Coverage for /usr/local/lib/python3.11/dist-packages/pyrocko/util.py: 78%

1229 statements  

« prev     ^ index     » next       coverage.py v6.5.0, created at 2023-10-23 12:35 +0000

1# http://pyrocko.org - GPLv3 

2# 

3# The Pyrocko Developers, 21st Century 

4# ---|P------/S----------~Lg---------- 

5''' 

6Utility functions for Pyrocko. 

7 

8.. _time-handling-mode: 

9 

10High precision time handling mode 

11................................. 

12 

13Pyrocko can treat timestamps either as standard double precision (64 bit) 

14floating point values, or as high precision floats (``numpy.float128`` or 

15``numpy.float96``, whichever is available, `see NumPy Scalars 

16<https://numpy.org/doc/stable/reference/arrays.scalars.html>`_), aliased here 

17as :py:class:`~pyrocko.util.hpfloat`. High precision time stamps are required 

18when handling data with sub-millisecond precision, i.e. kHz/MHz data streams 

19and event catalogs derived from such data. 

20 

21Not all functions in Pyrocko and in programs depending on Pyrocko may work 

22correctly with high precision times. Therefore, Pyrocko's high precision time 

23handling mode has to be actively activated by user config, command line option 

24or enforced within a certain script/program. 

25 

26The default high precision time handling mode can be configured globally with 

27the user configuration variable 

28:py:gattr:`~pyrocko.config.PyrockoConfig.use_high_precision_time`. Calling the 

29function :py:func:`use_high_precision_time` overrides the default from the 

30config file. This function may be called at startup of a program/script 

31requiring a specific time handling mode. 

32 

33To create a valid time stamp for use in Pyrocko (e.g. in 

34:py:class:`~pyrocko.model.event.Event` or 

35:py:class:`~pyrocko.trace.Trace` objects), use: 

36 

37.. code-block :: python 

38 

39 import time 

40 from pyrocko import util 

41 

42 # By default using mode selected in user config, override with: 

43 # util.use_high_precision_time(True) # force high precision mode 

44 # util.use_high_precision_time(False) # force low precision mode 

45 

46 t1 = util.str_to_time('2020-08-27 10:22:00') 

47 t2 = util.str_to_time('2020-08-27 10:22:00.111222') 

48 t3 = util.to_time_float(time.time()) 

49 

50 # To get the appropriate float class, use: 

51 

52 time_float = util.get_time_float() 

53 # -> float, numpy.float128 or numpy.float96 

54 [isinstance(t, time_float) for t in [t1, t2, t3]] 

55 # -> [True, True, True] 

56 

57 # Shortcut: 

58 util.check_time_class(t1) 

59 

60Module content 

61.............. 

62 

63.. py:class:: hpfloat 

64 

65 Alias for NumPy's high precision float data type ``float128`` or 

66 ``float96``, if available. 

67 

68 On platforms lacking support for high precision floats, an attempt to 

69 create a ``hpfloat`` instance, raises :py:exc:`HPFloatUnavailable`. 

70 

71''' 

72 

73import time 

74import logging 

75import os 

76import sys 

77import re 

78import calendar 

79import math 

80import fnmatch 

81import inspect 

82import weakref 

83try: 

84 import fcntl 

85except ImportError: 

86 fcntl = None 

87import optparse 

88import os.path as op 

89import errno 

90 

91import numpy as num 

92from scipy import signal 

93import pyrocko 

94from pyrocko import dummy_progressbar 

95 

96 

97from urllib.parse import urlencode, quote, unquote # noqa 

98from urllib.request import Request, build_opener, HTTPDigestAuthHandler # noqa 

99from urllib.request import urlopen as _urlopen # noqa 

100from urllib.error import HTTPError, URLError # noqa 

101 

102 

103try: 

104 import certifi 

105 import ssl 

106 g_ssl_context = ssl.create_default_context(cafile=certifi.where()) 

107except ImportError: 

108 g_ssl_context = None 

109 

110 

111class URLErrorSSL(URLError): 

112 

113 def __init__(self, urlerror): 

114 self.__dict__ = urlerror.__dict__.copy() 

115 

116 def __str__(self): 

117 return ( 

118 'Requesting web resource failed and the problem could be ' 

119 'related to SSL. Python standard libraries on some older ' 

120 'systems (like Ubuntu 14.04) are known to have trouble ' 

121 "with some SSL setups of today's servers: %s" 

122 % URLError.__str__(self)) 

123 

124 

125def urlopen_ssl_check(*args, **kwargs): 

126 try: 

127 return urlopen(*args, **kwargs) 

128 except URLError as e: 

129 if str(e).find('SSL') != -1: 

130 raise URLErrorSSL(e) 

131 else: 

132 raise 

133 

134 

135def urlopen(*args, **kwargs): 

136 

137 if 'context' not in kwargs and g_ssl_context is not None: 

138 kwargs['context'] = g_ssl_context 

139 

140 return _urlopen(*args, **kwargs) 

141 

142 

143try: 

144 long 

145except NameError: 

146 long = int 

147 

148 

149force_dummy_progressbar = False 

150 

151 

152try: 

153 from pyrocko import util_ext 

154except ImportError: 

155 util_ext = None 

156 

157 

158logger = logging.getLogger('pyrocko.util') 

159 

160 

161# fallbacks num_full and num_full_like are not needed anymore but 

162# kept here because downstream code may still use these. 

163try: 

164 num_full = num.full 

165except AttributeError: 

166 def num_full(shape, fill_value, dtype=None, order='C'): 

167 a = num.empty(shape, dtype=dtype, order=order) 

168 a.fill(fill_value) 

169 return a 

170 

171try: 

172 num_full_like = num.full_like 

173except AttributeError: 

174 def num_full_like(arr, fill_value, dtype=None, order='K', subok=True): 

175 a = num.empty_like(arr, dtype=dtype, order=order, subok=subok) 

176 a.fill(fill_value) 

177 return a 

178 

179 

180g_setup_logging_args = 'pyrocko', 'warning' 

181 

182 

183def setup_logging(programname='pyrocko', levelname='warning'): 

184 ''' 

185 Initialize logging. 

186 

187 :param programname: program name to be written in log 

188 :param levelname: string indicating the logging level ('debug', 'info', 

189 'warning', 'error', 'critical') 

190 

191 This function is called at startup by most pyrocko programs to set up a 

192 consistent logging format. This is simply a shortcut to a call to 

193 :py:func:`logging.basicConfig()`. 

194 ''' 

195 

196 global g_setup_logging_args 

197 g_setup_logging_args = (programname, levelname) 

198 

199 levels = {'debug': logging.DEBUG, 

200 'info': logging.INFO, 

201 'warning': logging.WARNING, 

202 'error': logging.ERROR, 

203 'critical': logging.CRITICAL} 

204 

205 logging.basicConfig( 

206 level=levels[levelname], 

207 format=programname+':%(name)-25s - %(levelname)-8s - %(message)s') 

208 

209 

210def subprocess_setup_logging_args(): 

211 ''' 

212 Get arguments from previous call to setup_logging. 

213 

214 These can be sent down to a worker process so it can setup its logging 

215 in the same way as the main process. 

216 ''' 

217 return g_setup_logging_args 

218 

219 

220def data_file(fn): 

221 return os.path.join(os.path.split(__file__)[0], 'data', fn) 

222 

223 

224class DownloadError(Exception): 

225 ''' 

226 Raised when a download failed. 

227 ''' 

228 pass 

229 

230 

231class PathExists(DownloadError): 

232 ''' 

233 Raised when the download target file already exists. 

234 ''' 

235 pass 

236 

237 

238def _download(url, fpath, username=None, password=None, 

239 force=False, method='download', stats=None, 

240 status_callback=None, entries_wanted=None, 

241 recursive=False, header=None): 

242 

243 import requests 

244 from requests.auth import HTTPBasicAuth 

245 from requests.exceptions import HTTPError as req_HTTPError 

246 

247 requests.adapters.DEFAULT_RETRIES = 5 

248 urljoin = requests.compat.urljoin 

249 

250 session = requests.Session() 

251 session.header = header 

252 session.auth = None if username is None\ 

253 else HTTPBasicAuth(username, password) 

254 

255 status = { 

256 'ntotal_files': 0, 

257 'nread_files': 0, 

258 'ntotal_bytes_all_files': 0, 

259 'nread_bytes_all_files': 0, 

260 'ntotal_bytes_current_file': 0, 

261 'nread_bytes_current_file': 0, 

262 'finished': False 

263 } 

264 

265 try: 

266 url_to_size = {} 

267 

268 if callable(status_callback): 

269 status_callback(status) 

270 

271 if not recursive and url.endswith('/'): 

272 raise DownloadError( 

273 'URL: %s appears to be a directory' 

274 ' but recurvise download is False' % url) 

275 

276 if recursive and not url.endswith('/'): 

277 url += '/' 

278 

279 def parse_directory_tree(url, subdir=''): 

280 r = session.get(urljoin(url, subdir)) 

281 r.raise_for_status() 

282 

283 entries = re.findall(r'href="([a-zA-Z0-9_.-]+/?)"', r.text) 

284 

285 files = sorted(set(subdir + fn for fn in entries 

286 if not fn.endswith('/'))) 

287 

288 if entries_wanted is not None: 

289 files = [fn for fn in files 

290 if (fn in wanted for wanted in entries_wanted)] 

291 

292 status['ntotal_files'] += len(files) 

293 

294 dirs = sorted(set(subdir + dn for dn in entries 

295 if dn.endswith('/') 

296 and dn not in ('./', '../'))) 

297 

298 for dn in dirs: 

299 files.extend(parse_directory_tree( 

300 url, subdir=dn)) 

301 

302 return files 

303 

304 def get_content_length(url): 

305 if url not in url_to_size: 

306 r = session.head(url, headers={'Accept-Encoding': ''}) 

307 

308 content_length = r.headers.get('content-length', None) 

309 if content_length is None: 

310 logger.debug('Could not get HTTP header ' 

311 'Content-Length for %s' % url) 

312 

313 content_length = None 

314 

315 else: 

316 content_length = int(content_length) 

317 status['ntotal_bytes_all_files'] += content_length 

318 if callable(status_callback): 

319 status_callback(status) 

320 

321 url_to_size[url] = content_length 

322 

323 return url_to_size[url] 

324 

325 def download_file(url, fn): 

326 logger.info('starting download of %s...' % url) 

327 ensuredirs(fn) 

328 

329 fsize = get_content_length(url) 

330 status['ntotal_bytes_current_file'] = fsize 

331 status['nread_bytes_current_file'] = 0 

332 if callable(status_callback): 

333 status_callback(status) 

334 

335 r = session.get(url, stream=True, timeout=5) 

336 r.raise_for_status() 

337 

338 frx = 0 

339 fn_tmp = fn + '.%i.temp' % os.getpid() 

340 with open(fn_tmp, 'wb') as f: 

341 for d in r.iter_content(chunk_size=1024): 

342 f.write(d) 

343 frx += len(d) 

344 

345 status['nread_bytes_all_files'] += len(d) 

346 status['nread_bytes_current_file'] += len(d) 

347 if callable(status_callback): 

348 status_callback(status) 

349 

350 os.rename(fn_tmp, fn) 

351 

352 if fsize is not None and frx != fsize: 

353 logger.warning( 

354 'HTTP header Content-Length: %i bytes does not match ' 

355 'download size %i bytes' % (fsize, frx)) 

356 

357 logger.info('finished download of %s' % url) 

358 

359 status['nread_files'] += 1 

360 if callable(status_callback): 

361 status_callback(status) 

362 

363 if recursive: 

364 if op.exists(fpath) and not force: 

365 raise PathExists('path %s already exists' % fpath) 

366 

367 files = parse_directory_tree(url) 

368 

369 dsize = 0 

370 for fn in files: 

371 file_url = urljoin(url, fn) 

372 dsize += get_content_length(file_url) or 0 

373 

374 if method == 'calcsize': 

375 return dsize 

376 

377 else: 

378 for fn in files: 

379 file_url = urljoin(url, fn) 

380 download_file(file_url, op.join(fpath, fn)) 

381 

382 else: 

383 status['ntotal_files'] += 1 

384 if callable(status_callback): 

385 status_callback(status) 

386 

387 fsize = get_content_length(url) 

388 if method == 'calcsize': 

389 return fsize 

390 else: 

391 download_file(url, fpath) 

392 

393 except req_HTTPError as e: 

394 logging.warning('http error: %s' % e) 

395 raise DownloadError('could not download file(s) from: %s' % url) 

396 

397 finally: 

398 status['finished'] = True 

399 if callable(status_callback): 

400 status_callback(status) 

401 session.close() 

402 

403 

404def download_file( 

405 url, fpath, username=None, password=None, status_callback=None, 

406 **kwargs): 

407 return _download( 

408 url, fpath, username, password, 

409 recursive=False, 

410 status_callback=status_callback, 

411 **kwargs) 

412 

413 

414def download_dir( 

415 url, fpath, username=None, password=None, status_callback=None, 

416 **kwargs): 

417 

418 return _download( 

419 url, fpath, username, password, 

420 recursive=True, 

421 status_callback=status_callback, 

422 **kwargs) 

423 

424 

425class HPFloatUnavailable(Exception): 

426 ''' 

427 Raised when a high precision float type would be required but is not 

428 available. 

429 ''' 

430 pass 

431 

432 

433class dummy_hpfloat(object): 

434 def __init__(self, *args, **kwargs): 

435 raise HPFloatUnavailable( 

436 'NumPy lacks support for float128 or float96 data type on this ' 

437 'platform.') 

438 

439 

440if hasattr(num, 'float128'): 

441 hpfloat = num.float128 

442 

443elif hasattr(num, 'float96'): 

444 hpfloat = num.float96 

445 

446else: 

447 hpfloat = dummy_hpfloat 

448 

449 

450g_time_float = None 

451g_time_dtype = None 

452 

453 

454class TimeFloatSettingError(Exception): 

455 pass 

456 

457 

458def use_high_precision_time(enabled): 

459 ''' 

460 Globally force a specific time handling mode. 

461 

462 See :ref:`High precision time handling mode <time-handling-mode>`. 

463 

464 :param enabled: enable/disable use of high precision time type 

465 :type enabled: bool 

466 

467 This function should be called before handling/reading any time data. 

468 It can only be called once. 

469 

470 Special attention is required when using multiprocessing on a platform 

471 which does not use fork under the hood. In such cases, the desired setting 

472 must be set also in the subprocess. 

473 ''' 

474 _setup_high_precision_time_mode(enabled_app=enabled) 

475 

476 

477def _setup_high_precision_time_mode(enabled_app=False): 

478 global g_time_float 

479 global g_time_dtype 

480 

481 if not (g_time_float is None and g_time_dtype is None): 

482 raise TimeFloatSettingError( 

483 'Cannot set time handling mode: too late, it has already been ' 

484 'fixed by an earlier call.') 

485 

486 from pyrocko import config 

487 

488 conf = config.config() 

489 enabled_config = conf.use_high_precision_time 

490 

491 enabled_env = os.environ.get('PYROCKO_USE_HIGH_PRECISION_TIME', None) 

492 if enabled_env is not None: 

493 try: 

494 enabled_env = int(enabled_env) == 1 

495 except ValueError: 

496 raise TimeFloatSettingError( 

497 'Environment variable PYROCKO_USE_HIGH_PRECISION_TIME ' 

498 'should be set to 0 or 1.') 

499 

500 enabled = enabled_config 

501 mode_from = 'config variable `use_high_precision_time`' 

502 notify = enabled 

503 

504 if enabled_env is not None: 

505 if enabled_env != enabled: 

506 notify = True 

507 enabled = enabled_env 

508 mode_from = 'environment variable `PYROCKO_USE_HIGH_PRECISION_TIME`' 

509 

510 if enabled_app is not None: 

511 if enabled_app != enabled: 

512 notify = True 

513 enabled = enabled_app 

514 mode_from = 'application override' 

515 

516 logger.debug(''' 

517Pyrocko high precision time mode selection (latter override earlier): 

518 config: %s 

519 env: %s 

520 app: %s 

521 -> enabled: %s'''.lstrip() % ( 

522 enabled_config, enabled_env, enabled_app, enabled)) 

523 

524 if notify: 

525 logger.info('Pyrocko high precision time mode %s by %s.' % ( 

526 'activated' if enabled else 'deactivated', 

527 mode_from)) 

528 

529 if enabled: 

530 g_time_float = hpfloat 

531 g_time_dtype = hpfloat 

532 else: 

533 g_time_float = float 

534 g_time_dtype = num.float64 

535 

536 

537def get_time_float(): 

538 ''' 

539 Get the effective float class for timestamps. 

540 

541 See :ref:`High precision time handling mode <time-handling-mode>`. 

542 

543 :returns: :py:class:`float` or :py:class:`hpfloat`, depending on the 

544 current time handling mode 

545 ''' 

546 global g_time_float 

547 

548 if g_time_float is None: 

549 _setup_high_precision_time_mode() 

550 

551 return g_time_float 

552 

553 

554def get_time_dtype(): 

555 ''' 

556 Get effective NumPy float class to handle timestamps. 

557 

558 See :ref:`High precision time handling mode <time-handling-mode>`. 

559 ''' 

560 

561 global g_time_dtype 

562 

563 if g_time_dtype is None: 

564 _setup_high_precision_time_mode() 

565 

566 return g_time_dtype 

567 

568 

569def to_time_float(t): 

570 ''' 

571 Convert float to valid time stamp in the current time handling mode. 

572 

573 See :ref:`High precision time handling mode <time-handling-mode>`. 

574 ''' 

575 return get_time_float()(t) 

576 

577 

578class TimestampTypeError(ValueError): 

579 pass 

580 

581 

582def check_time_class(t, error='raise'): 

583 ''' 

584 Type-check variable against current time handling mode. 

585 

586 See :ref:`High precision time handling mode <time-handling-mode>`. 

587 ''' 

588 

589 if t == 0.0: 

590 return 

591 

592 if not isinstance(t, get_time_float()): 

593 message = ( 

594 'Timestamp %g is of type %s but should be of type %s with ' 

595 "Pyrocko's currently selected time handling mode.\n\n" 

596 'See https://pyrocko.org/docs/current/library/reference/util.html' 

597 '#high-precision-time-handling-mode' % ( 

598 t, type(t), get_time_float())) 

599 

600 if error == 'raise': 

601 raise TimestampTypeError(message) 

602 elif error == 'warn': 

603 logger.warning(message) 

604 else: 

605 assert False 

606 

607 

608class Stopwatch(object): 

609 ''' 

610 Simple stopwatch to measure elapsed wall clock time. 

611 

612 Usage:: 

613 

614 s = Stopwatch() 

615 time.sleep(1) 

616 print s() 

617 time.sleep(1) 

618 print s() 

619 ''' 

620 

621 def __init__(self): 

622 self.start = time.time() 

623 

624 def __call__(self): 

625 return time.time() - self.start 

626 

627 

628def wrap(text, line_length=80): 

629 ''' 

630 Paragraph and list-aware wrapping of text. 

631 ''' 

632 

633 text = text.strip('\n') 

634 at_lineend = re.compile(r' *\n') 

635 at_para = re.compile(r'((^|(\n\s*)?\n)(\s+[*] )|\n\s*\n)') 

636 

637 paragraphs = at_para.split(text)[::5] 

638 listindents = at_para.split(text)[4::5] 

639 newlist = at_para.split(text)[3::5] 

640 

641 listindents[0:0] = [None] 

642 listindents.append(True) 

643 newlist.append(None) 

644 

645 det_indent = re.compile(r'^ *') 

646 

647 outlines = [] 

648 for ip, p in enumerate(paragraphs): 

649 if not p: 

650 continue 

651 

652 if listindents[ip] is None: 

653 _indent = det_indent.findall(p)[0] 

654 findent = _indent 

655 else: 

656 findent = listindents[ip] 

657 _indent = ' ' * len(findent) 

658 

659 ll = line_length - len(_indent) 

660 llf = ll 

661 

662 oldlines = [s.strip() for s in at_lineend.split(p.rstrip())] 

663 p1 = ' '.join(oldlines) 

664 possible = re.compile(r'(^.{1,%i}|.{1,%i})( |$)' % (llf, ll)) 

665 for imatch, match in enumerate(possible.finditer(p1)): 

666 parout = match.group(1) 

667 if imatch == 0: 

668 outlines.append(findent + parout) 

669 else: 

670 outlines.append(_indent + parout) 

671 

672 if ip != len(paragraphs)-1 and ( 

673 listindents[ip] is None 

674 or newlist[ip] is not None 

675 or listindents[ip+1] is None): 

676 

677 outlines.append('') 

678 

679 return outlines 

680 

681 

682def ewrap(lines, width=80, indent=''): 

683 lines = list(lines) 

684 if not lines: 

685 return '' 

686 fwidth = max(len(s) for s in lines) 

687 nx = max(1, (80-len(indent)) // (fwidth+1)) 

688 i = 0 

689 rows = [] 

690 while i < len(lines): 

691 rows.append(indent + ' '.join(x.ljust(fwidth) for x in lines[i:i+nx])) 

692 i += nx 

693 

694 return '\n'.join(rows) 

695 

696 

697class BetterHelpFormatter(optparse.IndentedHelpFormatter): 

698 

699 def __init__(self, *args, **kwargs): 

700 optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs) 

701 

702 def format_option(self, option): 

703 ''' 

704 From IndentedHelpFormatter but using a different wrap method. 

705 ''' 

706 

707 help_text_position = 4 + self.current_indent 

708 help_text_width = self.width - help_text_position 

709 

710 opts = self.option_strings[option] 

711 opts = '%*s%s' % (self.current_indent, '', opts) 

712 if option.help: 

713 help_text = self.expand_default(option) 

714 

715 if self.help_position + len(help_text) + 1 <= self.width: 

716 lines = [ 

717 '', 

718 '%-*s %s' % (self.help_position, opts, help_text), 

719 ''] 

720 else: 

721 lines = [''] 

722 lines.append(opts) 

723 lines.append('') 

724 if option.help: 

725 help_lines = wrap(help_text, help_text_width) 

726 lines.extend(['%*s%s' % (help_text_position, '', line) 

727 for line in help_lines]) 

728 lines.append('') 

729 

730 return '\n'.join(lines) 

731 

732 def format_description(self, description): 

733 if not description: 

734 return '' 

735 

736 if self.current_indent == 0: 

737 lines = [] 

738 else: 

739 lines = [''] 

740 

741 lines.extend(wrap(description, self.width - self.current_indent)) 

742 if self.current_indent == 0: 

743 lines.append('\n') 

744 

745 return '\n'.join( 

746 ['%*s%s' % (self.current_indent, '', line) for line in lines]) 

747 

748 

749class ProgressBar: 

750 def __init__(self, label, n): 

751 from pyrocko.progress import progress 

752 self._context = progress.view() 

753 self._context.__enter__() 

754 self._task = progress.task(label, n) 

755 

756 def update(self, i): 

757 self._task.update(i) 

758 

759 def finish(self): 

760 self._task.done() 

761 if self._context: 

762 self._context.__exit__() 

763 self._context = None 

764 

765 

766def progressbar(label, maxval): 

767 if force_dummy_progressbar: 

768 return dummy_progressbar.ProgressBar(maxval=maxval).start() 

769 

770 return ProgressBar(label, maxval) 

771 

772 

773def progress_beg(label): 

774 ''' 

775 Notify user that an operation has started. 

776 

777 :param label: name of the operation 

778 

779 To be used in conjuction with :py:func:`progress_end`. 

780 ''' 

781 

782 sys.stderr.write(label) 

783 sys.stderr.flush() 

784 

785 

786def progress_end(label=''): 

787 ''' 

788 Notify user that an operation has ended. 

789 

790 :param label: name of the operation 

791 

792 To be used in conjuction with :py:func:`progress_beg`. 

793 ''' 

794 

795 sys.stderr.write(' done. %s\n' % label) 

796 sys.stderr.flush() 

797 

798 

799class ArangeError(ValueError): 

800 ''' 

801 Raised by :py:func:`arange2` for inconsistent range specifications. 

802 ''' 

803 pass 

804 

805 

806def arange2(start, stop, step, dtype=float, epsilon=1e-6, error='raise'): 

807 ''' 

808 Return evenly spaced numbers over a specified interval. 

809 

810 Like :py:func:`numpy.arange` but returning floating point numbers by 

811 default and with defined behaviour when stepsize is inconsistent with 

812 interval bounds. It is considered inconsistent if the difference between 

813 the closest multiple of ``step`` and ``stop`` is larger than ``epsilon * 

814 step``. Inconsistencies are handled according to the ``error`` parameter. 

815 If it is set to ``'raise'`` an exception of type :py:exc:`ArangeError` is 

816 raised. If it is set to ``'round'``, ``'floor'``, or ``'ceil'``, ``stop`` 

817 is silently changed to the closest, the next smaller, or next larger 

818 multiple of ``step``, respectively. 

819 ''' 

820 

821 assert error in ('raise', 'round', 'floor', 'ceil') 

822 

823 start = dtype(start) 

824 stop = dtype(stop) 

825 step = dtype(step) 

826 

827 rnd = {'floor': math.floor, 'ceil': math.ceil}.get(error, round) 

828 

829 n = int(rnd((stop - start) / step)) + 1 

830 stop_check = start + (n-1) * step 

831 

832 if error == 'raise' and abs(stop_check - stop) > step * epsilon: 

833 raise ArangeError( 

834 'inconsistent range specification: start=%g, stop=%g, step=%g' 

835 % (start, stop, step)) 

836 

837 x = num.arange(n, dtype=dtype) 

838 x *= step 

839 x += start 

840 return x 

841 

842 

843def polylinefit(x, y, n_or_xnodes): 

844 ''' 

845 Fit piece-wise linear function to data. 

846 

847 :param x,y: arrays with coordinates of data 

848 :param n_or_xnodes: int, number of segments or x coordinates of polyline 

849 

850 :returns: `(xnodes, ynodes, rms_error)` arrays with coordinates of 

851 polyline, root-mean-square error 

852 ''' 

853 

854 x = num.asarray(x) 

855 y = num.asarray(y) 

856 

857 if isinstance(n_or_xnodes, int): 

858 n = n_or_xnodes 

859 xmin = x.min() 

860 xmax = x.max() 

861 xnodes = num.linspace(xmin, xmax, n+1) 

862 else: 

863 xnodes = num.asarray(n_or_xnodes) 

864 n = xnodes.size - 1 

865 

866 assert len(x) == len(y) 

867 assert n > 0 

868 

869 ndata = len(x) 

870 a = num.zeros((ndata+(n-1), n*2)) 

871 for i in range(n): 

872 xmin_block = xnodes[i] 

873 xmax_block = xnodes[i+1] 

874 if i == n-1: # don't loose last point 

875 indices = num.where( 

876 num.logical_and(xmin_block <= x, x <= xmax_block))[0] 

877 else: 

878 indices = num.where( 

879 num.logical_and(xmin_block <= x, x < xmax_block))[0] 

880 

881 a[indices, i*2] = x[indices] 

882 a[indices, i*2+1] = 1.0 

883 

884 w = float(ndata)*100. 

885 if i < n-1: 

886 a[ndata+i, i*2] = xmax_block*w 

887 a[ndata+i, i*2+1] = 1.0*w 

888 a[ndata+i, i*2+2] = -xmax_block*w 

889 a[ndata+i, i*2+3] = -1.0*w 

890 

891 d = num.concatenate((y, num.zeros(n-1))) 

892 model = num.linalg.lstsq(a, d, rcond=-1)[0].reshape((n, 2)) 

893 

894 ynodes = num.zeros(n+1) 

895 ynodes[:n] = model[:, 0]*xnodes[:n] + model[:, 1] 

896 ynodes[1:] += model[:, 0]*xnodes[1:] + model[:, 1] 

897 ynodes[1:n] *= 0.5 

898 

899 rms_error = num.sqrt(num.mean((num.interp(x, xnodes, ynodes) - y)**2)) 

900 

901 return xnodes, ynodes, rms_error 

902 

903 

904def plf_integrate_piecewise(x_edges, x, y): 

905 ''' 

906 Calculate definite integral of piece-wise linear function on intervals. 

907 

908 Use trapezoidal rule to calculate definite integral of a piece-wise linear 

909 function for a series of consecutive intervals. ``x_edges`` and ``x`` must 

910 be sorted. 

911 

912 :param x_edges: array with edges of the intervals 

913 :param x,y: arrays with coordinates of piece-wise linear function's 

914 control points 

915 ''' 

916 

917 x_all = num.concatenate((x, x_edges)) 

918 ii = num.argsort(x_all) 

919 y_edges = num.interp(x_edges, x, y) 

920 y_all = num.concatenate((y, y_edges)) 

921 xs = x_all[ii] 

922 ys = y_all[ii] 

923 y_all[ii[1:]] = num.cumsum((xs[1:] - xs[:-1]) * 0.5 * (ys[1:] + ys[:-1])) 

924 return num.diff(y_all[-len(y_edges):]) 

925 

926 

927def diff_fd_1d_4o(dt, data): 

928 ''' 

929 Approximate first derivative of an array (forth order, central FD). 

930 

931 :param dt: sampling interval 

932 :param data: NumPy array with data samples 

933 

934 :returns: NumPy array with same shape as input 

935 

936 Interior points are approximated to fourth order, edge points to first 

937 order right- or left-sided respectively, points next to edge to second 

938 order central. 

939 ''' 

940 import scipy.signal 

941 

942 ddata = num.empty_like(data, dtype=float) 

943 

944 if data.size >= 5: 

945 ddata[2:-2] = scipy.signal.lfilter( 

946 [-1., +8., 0., -8., 1.], [1.], data)[4:] / (12.*dt) 

947 

948 if data.size >= 3: 

949 ddata[1] = (data[2] - data[0]) / (2. * dt) 

950 ddata[-2] = (data[-1] - data[-3]) / (2. * dt) 

951 

952 if data.size >= 2: 

953 ddata[0] = (data[1] - data[0]) / dt 

954 ddata[-1] = (data[-1] - data[-2]) / dt 

955 

956 if data.size == 1: 

957 ddata[0] = 0.0 

958 

959 return ddata 

960 

961 

962def diff_fd_1d_2o(dt, data): 

963 ''' 

964 Approximate first derivative of an array (second order, central FD). 

965 

966 :param dt: sampling interval 

967 :param data: NumPy array with data samples 

968 

969 :returns: NumPy array with same shape as input 

970 

971 Interior points are approximated to second order, edge points to first 

972 order right- or left-sided respectively. 

973 

974 Uses :py:func:`numpy.gradient`. 

975 ''' 

976 

977 return num.gradient(data, dt) 

978 

979 

980def diff_fd_2d_4o(dt, data): 

981 ''' 

982 Approximate second derivative of an array (forth order, central FD). 

983 

984 :param dt: sampling interval 

985 :param data: NumPy array with data samples 

986 

987 :returns: NumPy array with same shape as input 

988 

989 Interior points are approximated to fourth order, next-to-edge points to 

990 second order, edge points repeated. 

991 ''' 

992 import scipy.signal 

993 

994 ddata = num.empty_like(data, dtype=float) 

995 

996 if data.size >= 5: 

997 ddata[2:-2] = scipy.signal.lfilter( 

998 [-1., +16., -30., +16., -1.], [1.], data)[4:] / (12.*dt**2) 

999 

1000 if data.size >= 3: 

1001 ddata[:2] = (data[2] - 2.0 * data[1] + data[0]) / dt**2 

1002 ddata[-2:] = (data[-1] - 2.0 * data[-2] + data[-3]) / dt**2 

1003 

1004 if data.size < 3: 

1005 ddata[:] = 0.0 

1006 

1007 return ddata 

1008 

1009 

1010def diff_fd_2d_2o(dt, data): 

1011 ''' 

1012 Approximate second derivative of an array (second order, central FD). 

1013 

1014 :param dt: sampling interval 

1015 :param data: NumPy array with data samples 

1016 

1017 :returns: NumPy array with same shape as input 

1018 

1019 Interior points are approximated to second order, edge points repeated. 

1020 ''' 

1021 import scipy.signal 

1022 

1023 ddata = num.empty_like(data, dtype=float) 

1024 

1025 if data.size >= 3: 

1026 ddata[1:-1] = scipy.signal.lfilter( 

1027 [1., -2., 1.], [1.], data)[2:] / (dt**2) 

1028 

1029 ddata[0] = ddata[1] 

1030 ddata[-1] = ddata[-2] 

1031 

1032 if data.size < 3: 

1033 ddata[:] = 0.0 

1034 

1035 return ddata 

1036 

1037 

1038def diff_fd(n, order, dt, data): 

1039 ''' 

1040 Approximate 1st or 2nd derivative of an array. 

1041 

1042 :param n: 1 for first derivative, 2 for second 

1043 :param order: order of the approximation 2 and 4 are supported 

1044 :param dt: sampling interval 

1045 :param data: NumPy array with data samples 

1046 

1047 :returns: NumPy array with same shape as input 

1048 

1049 This is a frontend to the functions :py:func:`diff_fd_1d_2o`, 

1050 :py:func:`diff_fd_1d_4o`, :py:func:`diff_fd_2d_2o`, and 

1051 :py:func:`diff_fd_2d_4o`. 

1052 

1053 Raises :py:exc:`ValueError` for unsupported `n` or `order`. 

1054 ''' 

1055 

1056 funcs = { 

1057 1: {2: diff_fd_1d_2o, 4: diff_fd_1d_4o}, 

1058 2: {2: diff_fd_2d_2o, 4: diff_fd_2d_4o}} 

1059 

1060 try: 

1061 funcs_n = funcs[n] 

1062 except KeyError: 

1063 raise ValueError( 

1064 'pyrocko.util.diff_fd: ' 

1065 'Only 1st and 2sd derivatives are supported.') 

1066 

1067 try: 

1068 func = funcs_n[order] 

1069 except KeyError: 

1070 raise ValueError( 

1071 'pyrocko.util.diff_fd: ' 

1072 'Order %i is not supported for %s derivative. Supported: %s' % ( 

1073 order, ['', '1st', '2nd'][n], 

1074 ', '.join('%i' % order for order in sorted(funcs_n.keys())))) 

1075 

1076 return func(dt, data) 

1077 

1078 

1079class GlobalVars(object): 

1080 reuse_store = dict() 

1081 decitab_nmax = 0 

1082 decitab = {} 

1083 decimate_fir_coeffs = {} 

1084 decimate_fir_remez_coeffs = {} 

1085 decimate_iir_coeffs = {} 

1086 re_frac = None 

1087 

1088 

1089def decimate_coeffs(q, n=None, ftype='iir'): 

1090 

1091 q = int(q) 

1092 

1093 if n is None: 

1094 if ftype == 'fir': 

1095 n = 30 

1096 elif ftype == 'fir-remez': 

1097 n = 45*q 

1098 else: 

1099 n = 8 

1100 

1101 if ftype == 'fir': 

1102 coeffs = GlobalVars.decimate_fir_coeffs 

1103 if (n, 1./q) not in coeffs: 

1104 coeffs[n, 1./q] = signal.firwin(n+1, .75/q, window='hamming') 

1105 

1106 b = coeffs[n, 1./q] 

1107 return b, [1.], n 

1108 

1109 elif ftype == 'fir-remez': 

1110 coeffs = GlobalVars.decimate_fir_remez_coeffs 

1111 if (n, 1./q) not in coeffs: 

1112 coeffs[n, 1./q] = signal.remez( 

1113 n+1, (0., .75/q, 1./q, 1.), 

1114 (1., 0.), fs=2, weight=(1, 50)) 

1115 b = coeffs[n, 1./q] 

1116 return b, [1.], n 

1117 

1118 else: 

1119 coeffs = GlobalVars.decimate_iir_coeffs 

1120 if (n, 0.05, 0.8/q) not in coeffs: 

1121 coeffs[n, 0.05, 0.8/q] = signal.cheby1(n, 0.05, 0.8/q) 

1122 

1123 b, a = coeffs[n, 0.05, 0.8/q] 

1124 return b, a, n 

1125 

1126 

1127def decimate(x, q, n=None, ftype='iir', zi=None, ioff=0): 

1128 ''' 

1129 Downsample the signal x by an integer factor q, using an order n filter 

1130 

1131 By default, an order 8 Chebyshev type I filter is used or a 30 point FIR 

1132 filter with hamming window if ftype is 'fir'. 

1133 

1134 :param x: the signal to be downsampled (1D :class:`numpy.ndarray`) 

1135 :param q: the downsampling factor 

1136 :param n: order of the filter (1 less than the length of the filter for a 

1137 `fir` filter) 

1138 :param ftype: type of the filter; can be `iir`, `fir` or `fir-remez` 

1139 

1140 :returns: the downsampled signal (1D :class:`numpy.ndarray`) 

1141 

1142 ''' 

1143 

1144 b, a, n = decimate_coeffs(q, n, ftype) 

1145 

1146 if zi is None or zi is True: 

1147 zi_ = num.zeros(max(len(a), len(b))-1, dtype=float) 

1148 else: 

1149 zi_ = zi 

1150 

1151 y, zf = signal.lfilter(b, a, x, zi=zi_) 

1152 

1153 if zi is not None: 

1154 return y[n//2+ioff::q].copy(), zf 

1155 else: 

1156 return y[n//2+ioff::q].copy() 

1157 

1158 

1159class UnavailableDecimation(Exception): 

1160 ''' 

1161 Exception raised by :py:func:`decitab` for unavailable decimation factors. 

1162 ''' 

1163 

1164 pass 

1165 

1166 

1167def gcd(a, b, epsilon=1e-7): 

1168 ''' 

1169 Greatest common divisor. 

1170 ''' 

1171 

1172 while b > epsilon*a: 

1173 a, b = b, a % b 

1174 

1175 return a 

1176 

1177 

1178def lcm(a, b): 

1179 ''' 

1180 Least common multiple. 

1181 ''' 

1182 

1183 return a*b // gcd(a, b) 

1184 

1185 

1186def mk_decitab(nmax=100): 

1187 ''' 

1188 Make table with decimation sequences. 

1189 

1190 Decimation from one sampling rate to a lower one is achieved by a 

1191 successive application of :py:func:`decimate` with small integer 

1192 downsampling factors (because using large downsampling factors can make the 

1193 decimation unstable or slow). This function sets up a table with downsample 

1194 sequences for factors up to ``nmax``. 

1195 ''' 

1196 

1197 tab = GlobalVars.decitab 

1198 for i in range(1, 10): 

1199 for j in range(1, i+1): 

1200 for k in range(1, j+1): 

1201 for l_ in range(1, k+1): 

1202 for m in range(1, l_+1): 

1203 p = i*j*k*l_*m 

1204 if p > nmax: 

1205 break 

1206 if p not in tab: 

1207 tab[p] = (i, j, k, l_, m) 

1208 if i*j*k*l_ > nmax: 

1209 break 

1210 if i*j*k > nmax: 

1211 break 

1212 if i*j > nmax: 

1213 break 

1214 if i > nmax: 

1215 break 

1216 

1217 GlobalVars.decitab_nmax = nmax 

1218 

1219 

1220def zfmt(n): 

1221 return '%%0%ii' % (int(math.log10(n - 1)) + 1) 

1222 

1223 

1224def _year_to_time(year): 

1225 tt = (year, 1, 1, 0, 0, 0) 

1226 return to_time_float(calendar.timegm(tt)) 

1227 

1228 

1229def _working_year(year): 

1230 try: 

1231 tt = (year, 1, 1, 0, 0, 0) 

1232 t = calendar.timegm(tt) 

1233 tt2_ = time.gmtime(t) 

1234 tt2 = tuple(tt2_)[:6] 

1235 if tt != tt2: 

1236 return False 

1237 

1238 s = '%i-01-01 00:00:00' % year 

1239 s2 = time.strftime('%Y-%m-%d %H:%M:%S', tt2_) 

1240 if s != s2: 

1241 return False 

1242 

1243 t3 = str_to_time(s2, format='%Y-%m-%d %H:%M:%S') 

1244 s3 = time_to_str(t3, format='%Y-%m-%d %H:%M:%S') 

1245 if s3 != s2: 

1246 return False 

1247 

1248 except Exception: 

1249 return False 

1250 

1251 return True 

1252 

1253 

1254def working_system_time_range(year_min_lim=None, year_max_lim=None): 

1255 ''' 

1256 Check time range supported by the systems's time conversion functions. 

1257 

1258 Returns system time stamps of start of year of first/last fully supported 

1259 year span. If this is before 1900 or after 2100, return first/last century 

1260 which is fully supported. 

1261 

1262 :returns: ``(tmin, tmax, year_min, year_max)`` 

1263 ''' 

1264 

1265 year0 = 2000 

1266 year_min = year0 

1267 year_max = year0 

1268 

1269 itests = list(range(101)) 

1270 for i in range(19): 

1271 itests.append(200 + i*100) 

1272 

1273 for i in itests: 

1274 year = year0 - i 

1275 if year_min_lim is not None and year < year_min_lim: 

1276 year_min = year_min_lim 

1277 break 

1278 elif not _working_year(year): 

1279 break 

1280 else: 

1281 year_min = year 

1282 

1283 for i in itests: 

1284 year = year0 + i 

1285 if year_max_lim is not None and year > year_max_lim: 

1286 year_max = year_max_lim 

1287 break 

1288 elif not _working_year(year + 1): 

1289 break 

1290 else: 

1291 year_max = year 

1292 

1293 return ( 

1294 _year_to_time(year_min), 

1295 _year_to_time(year_max), 

1296 year_min, year_max) 

1297 

1298 

1299g_working_system_time_range = None 

1300 

1301 

1302def get_working_system_time_range(): 

1303 ''' 

1304 Caching variant of :py:func:`working_system_time_range`. 

1305 ''' 

1306 

1307 global g_working_system_time_range 

1308 if g_working_system_time_range is None: 

1309 g_working_system_time_range = working_system_time_range() 

1310 

1311 return g_working_system_time_range 

1312 

1313 

1314def is_working_time(t): 

1315 tmin, tmax, _, _ = get_working_system_time_range() 

1316 return tmin <= t <= tmax 

1317 

1318 

1319def julian_day_of_year(timestamp): 

1320 ''' 

1321 Get the day number after the 1st of January of year in ``timestamp``. 

1322 

1323 :returns: day number as int 

1324 ''' 

1325 

1326 return time.gmtime(int(timestamp)).tm_yday 

1327 

1328 

1329def hour_start(timestamp): 

1330 ''' 

1331 Get beginning of hour for any point in time. 

1332 

1333 :param timestamp: time instant as system timestamp (in seconds) 

1334 

1335 :returns: instant of hour start as system timestamp 

1336 ''' 

1337 

1338 tt = time.gmtime(int(timestamp)) 

1339 tts = tt[0:4] + (0, 0) 

1340 return to_time_float(calendar.timegm(tts)) 

1341 

1342 

1343def day_start(timestamp): 

1344 ''' 

1345 Get beginning of day for any point in time. 

1346 

1347 :param timestamp: time instant as system timestamp (in seconds) 

1348 

1349 :returns: instant of day start as system timestamp 

1350 ''' 

1351 

1352 tt = time.gmtime(int(timestamp)) 

1353 tts = tt[0:3] + (0, 0, 0) 

1354 return to_time_float(calendar.timegm(tts)) 

1355 

1356 

1357def month_start(timestamp): 

1358 ''' 

1359 Get beginning of month for any point in time. 

1360 

1361 :param timestamp: time instant as system timestamp (in seconds) 

1362 

1363 :returns: instant of month start as system timestamp 

1364 ''' 

1365 

1366 tt = time.gmtime(int(timestamp)) 

1367 tts = tt[0:2] + (1, 0, 0, 0) 

1368 return to_time_float(calendar.timegm(tts)) 

1369 

1370 

1371def year_start(timestamp): 

1372 ''' 

1373 Get beginning of year for any point in time. 

1374 

1375 :param timestamp: time instant as system timestamp (in seconds) 

1376 

1377 :returns: instant of year start as system timestamp 

1378 ''' 

1379 

1380 tt = time.gmtime(int(timestamp)) 

1381 tts = tt[0:1] + (1, 1, 0, 0, 0) 

1382 return to_time_float(calendar.timegm(tts)) 

1383 

1384 

1385def iter_days(tmin, tmax): 

1386 ''' 

1387 Yields begin and end of days until given time span is covered. 

1388 

1389 :param tmin,tmax: input time span 

1390 

1391 :yields: tuples with (begin, end) of days as system timestamps 

1392 ''' 

1393 

1394 t = day_start(tmin) 

1395 while t < tmax: 

1396 tend = day_start(t + 26*60*60) 

1397 yield t, tend 

1398 t = tend 

1399 

1400 

1401def iter_months(tmin, tmax): 

1402 ''' 

1403 Yields begin and end of months until given time span is covered. 

1404 

1405 :param tmin,tmax: input time span 

1406 

1407 :yields: tuples with (begin, end) of months as system timestamps 

1408 ''' 

1409 

1410 t = month_start(tmin) 

1411 while t < tmax: 

1412 tend = month_start(t + 24*60*60*33) 

1413 yield t, tend 

1414 t = tend 

1415 

1416 

1417def iter_years(tmin, tmax): 

1418 ''' 

1419 Yields begin and end of years until given time span is covered. 

1420 

1421 :param tmin,tmax: input time span 

1422 

1423 :yields: tuples with (begin, end) of years as system timestamps 

1424 ''' 

1425 

1426 t = year_start(tmin) 

1427 while t < tmax: 

1428 tend = year_start(t + 24*60*60*369) 

1429 yield t, tend 

1430 t = tend 

1431 

1432 

1433def today(): 

1434 return day_start(time.time()) 

1435 

1436 

1437def tomorrow(): 

1438 return day_start(time.time() + 24*60*60) 

1439 

1440 

1441def decitab(n): 

1442 ''' 

1443 Get integer decimation sequence for given downampling factor. 

1444 

1445 :param n: target decimation factor 

1446 

1447 :returns: tuple with downsampling sequence 

1448 ''' 

1449 

1450 if n > GlobalVars.decitab_nmax: 

1451 mk_decitab(n*2) 

1452 if n not in GlobalVars.decitab: 

1453 raise UnavailableDecimation('ratio = %g' % n) 

1454 return GlobalVars.decitab[n] 

1455 

1456 

1457def ctimegm(s, format='%Y-%m-%d %H:%M:%S'): 

1458 ''' 

1459 Convert string representing UTC time to system time. 

1460 

1461 :param s: string to be interpreted 

1462 :param format: format string passed to :py:func:`time.strptime` 

1463 

1464 :returns: system time stamp 

1465 

1466 Interpretes string with format ``'%Y-%m-%d %H:%M:%S'``, using strptime. 

1467 

1468 .. note:: 

1469 This function is to be replaced by :py:func:`str_to_time`. 

1470 ''' 

1471 

1472 return calendar.timegm(time.strptime(s, format)) 

1473 

1474 

1475def gmctime(t, format='%Y-%m-%d %H:%M:%S'): 

1476 ''' 

1477 Get string representation from system time, UTC. 

1478 

1479 Produces string with format ``'%Y-%m-%d %H:%M:%S'``, using strftime. 

1480 

1481 .. note:: 

1482 This function is to be repaced by :py:func:`time_to_str`. 

1483 ''' 

1484 

1485 return time.strftime(format, time.gmtime(t)) 

1486 

1487 

1488def gmctime_v(t, format='%a, %d %b %Y %H:%M:%S'): 

1489 ''' 

1490 Get string representation from system time, UTC. Same as 

1491 :py:func:`gmctime` but with a more verbose default format. 

1492 

1493 .. note:: 

1494 This function is to be replaced by :py:func:`time_to_str`. 

1495 ''' 

1496 

1497 return time.strftime(format, time.gmtime(t)) 

1498 

1499 

1500def gmctime_fn(t, format='%Y-%m-%d_%H-%M-%S'): 

1501 ''' 

1502 Get string representation from system time, UTC. Same as 

1503 :py:func:`gmctime` but with a default usable in filenames. 

1504 

1505 .. note:: 

1506 This function is to be replaced by :py:func:`time_to_str`. 

1507 ''' 

1508 

1509 return time.strftime(format, time.gmtime(t)) 

1510 

1511 

1512class TimeStrError(Exception): 

1513 ''' 

1514 Raised for invalid time strings. 

1515 ''' 

1516 pass 

1517 

1518 

1519class FractionalSecondsMissing(TimeStrError): 

1520 ''' 

1521 Exception raised by :py:func:`str_to_time` when the given string lacks 

1522 fractional seconds. 

1523 ''' 

1524 

1525 pass 

1526 

1527 

1528class FractionalSecondsWrongNumberOfDigits(TimeStrError): 

1529 ''' 

1530 Exception raised by :py:func:`str_to_time` when the given string has an 

1531 incorrect number of digits in the fractional seconds part. 

1532 ''' 

1533 

1534 pass 

1535 

1536 

1537def _endswith_n(s, endings): 

1538 for ix, x in enumerate(endings): 

1539 if s.endswith(x): 

1540 return ix 

1541 return -1 

1542 

1543 

1544def str_to_time(s, format='%Y-%m-%d %H:%M:%S.OPTFRAC'): 

1545 ''' 

1546 Convert string representing UTC time to floating point system time. 

1547 

1548 :param s: string representing UTC time 

1549 :param format: time string format 

1550 :returns: system time stamp as floating point value 

1551 

1552 Uses the semantics of :py:func:`time.strptime` but allows for fractional 

1553 seconds. If the format ends with ``'.FRAC'``, anything after a dot is 

1554 interpreted as fractional seconds. If the format ends with ``'.OPTFRAC'``, 

1555 the fractional part, including the dot is made optional. The latter has the 

1556 consequence, that the time strings and the format may not contain any other 

1557 dots. If the format ends with ``'.xFRAC'`` where x is 1, 2, or 3, it is 

1558 ensured, that exactly that number of digits are present in the fractional 

1559 seconds. 

1560 ''' 

1561 

1562 time_float = get_time_float() 

1563 

1564 if util_ext is not None: 

1565 try: 

1566 t, tfrac = util_ext.stt(s, format) 

1567 except util_ext.UtilExtError as e: 

1568 raise TimeStrError( 

1569 '%s, string=%s, format=%s' % (str(e), s, format)) 

1570 

1571 return time_float(t)+tfrac 

1572 

1573 fracsec = 0. 

1574 fixed_endings = '.FRAC', '.1FRAC', '.2FRAC', '.3FRAC' 

1575 

1576 iend = _endswith_n(format, fixed_endings) 

1577 if iend != -1: 

1578 dotpos = s.rfind('.') 

1579 if dotpos == -1: 

1580 raise FractionalSecondsMissing( 

1581 'string=%s, format=%s' % (s, format)) 

1582 

1583 if iend > 0 and iend != (len(s)-dotpos-1): 

1584 raise FractionalSecondsWrongNumberOfDigits( 

1585 'string=%s, format=%s' % (s, format)) 

1586 

1587 format = format[:-len(fixed_endings[iend])] 

1588 fracsec = float(s[dotpos:]) 

1589 s = s[:dotpos] 

1590 

1591 elif format.endswith('.OPTFRAC'): 

1592 dotpos = s.rfind('.') 

1593 format = format[:-8] 

1594 if dotpos != -1 and len(s[dotpos:]) > 1: 

1595 fracsec = float(s[dotpos:]) 

1596 

1597 if dotpos != -1: 

1598 s = s[:dotpos] 

1599 

1600 try: 

1601 return time_float(calendar.timegm(time.strptime(s, format))) \ 

1602 + fracsec 

1603 except ValueError as e: 

1604 raise TimeStrError('%s, string=%s, format=%s' % (str(e), s, format)) 

1605 

1606 

1607stt = str_to_time 

1608 

1609 

1610def str_to_time_fillup(s): 

1611 ''' 

1612 Default :py:func:`str_to_time` with filling in of missing values. 

1613 

1614 Allows e.g. `'2010-01-01 00:00:00'` as `'2010-01-01 00:00'`, 

1615 `'2010-01-01 00'`, ..., or `'2010'`. 

1616 ''' 

1617 

1618 if s == 'now': 

1619 return time.time() 

1620 

1621 if len(s) in (4, 7, 10, 13, 16): 

1622 s += '0000-01-01 00:00:00'[len(s):] 

1623 

1624 return str_to_time(s) 

1625 

1626 

1627def time_to_str(t, format='%Y-%m-%d %H:%M:%S.3FRAC'): 

1628 ''' 

1629 Get string representation for floating point system time. 

1630 

1631 :param t: floating point system time 

1632 :param format: time string format 

1633 :returns: string representing UTC time 

1634 

1635 Uses the semantics of :py:func:`time.strftime` but additionally allows for 

1636 fractional seconds. If ``format`` contains ``'.xFRAC'``, where ``x`` is a 

1637 digit between 1 and 9, this is replaced with the fractional part of ``t`` 

1638 with ``x`` digits precision. 

1639 ''' 

1640 

1641 if pyrocko.grumpy > 0: 

1642 check_time_class(t, 'warn' if pyrocko.grumpy == 1 else 'raise') 

1643 

1644 if isinstance(format, int): 

1645 if format > 0: 

1646 format = '%Y-%m-%d %H:%M:%S.' + '%iFRAC' % format 

1647 else: 

1648 format = '%Y-%m-%d %H:%M:%S' 

1649 

1650 if util_ext is not None: 

1651 t0 = num.floor(t) 

1652 try: 

1653 return util_ext.tts(int(t0), float(t - t0), format) 

1654 except util_ext.UtilExtError as e: 

1655 raise TimeStrError( 

1656 '%s, timestamp=%f, format=%s' % (str(e), t, format)) 

1657 

1658 if not GlobalVars.re_frac: 

1659 GlobalVars.re_frac = re.compile(r'\.[1-9]FRAC') 

1660 GlobalVars.frac_formats = dict( 

1661 [('.%sFRAC' % x, '%.'+x+'f') for x in '123456789']) 

1662 

1663 ts = float(num.floor(t)) 

1664 tfrac = t-ts 

1665 

1666 m = GlobalVars.re_frac.search(format) 

1667 if m: 

1668 sfrac = (GlobalVars.frac_formats[m.group(0)] % tfrac) 

1669 if sfrac[0] == '1': 

1670 ts += 1. 

1671 

1672 format, nsub = GlobalVars.re_frac.subn(sfrac[1:], format, 1) 

1673 

1674 return time.strftime(format, time.gmtime(ts)) 

1675 

1676 

1677tts = time_to_str 

1678_abbr_weekday = 'Mon Tue Wed Thu Fri Sat Sun'.split() 

1679_abbr_month = 'Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec'.split() 

1680 

1681 

1682def mystrftime(fmt=None, tt=None, milliseconds=0): 

1683 # Needed by Snuffler for the time axis. In other cases `time_to_str` 

1684 # should be used. 

1685 

1686 if fmt is None: 

1687 fmt = '%Y-%m-%d %H:%M:%S .%r' 

1688 

1689 # Get these two locale independent, needed by Snuffler. 

1690 # Setting the locale seems to have no effect. 

1691 fmt = fmt.replace('%a', _abbr_weekday[tt.tm_wday]) 

1692 fmt = fmt.replace('%b', _abbr_month[tt.tm_mon-1]) 

1693 

1694 fmt = fmt.replace('%r', '%03i' % int(round(milliseconds))) 

1695 fmt = fmt.replace('%u', '%06i' % int(round(milliseconds*1000))) 

1696 fmt = fmt.replace('%n', '%09i' % int(round(milliseconds*1000000))) 

1697 return time.strftime(fmt, tt) 

1698 

1699 

1700def gmtime_x(timestamp): 

1701 etimestamp = float(num.floor(timestamp)) 

1702 tt = time.gmtime(etimestamp) 

1703 ms = (timestamp-etimestamp)*1000 

1704 return tt, ms 

1705 

1706 

1707def plural_s(n): 

1708 if not isinstance(n, int): 

1709 n = len(n) 

1710 

1711 return 's' if n != 1 else '' 

1712 

1713 

1714def ensuredirs(dst): 

1715 ''' 

1716 Create all intermediate path components for a target path. 

1717 

1718 :param dst: target path 

1719 

1720 The leaf part of the target path is not created (use :py:func:`ensuredir` 

1721 if a the target path is a directory to be created). 

1722 ''' 

1723 

1724 d, x = os.path.split(dst.rstrip(os.sep)) 

1725 dirs = [] 

1726 while d and not os.path.exists(d): 

1727 dirs.append(d) 

1728 d, x = os.path.split(d) 

1729 

1730 dirs.reverse() 

1731 

1732 for d in dirs: 

1733 try: 

1734 os.mkdir(d) 

1735 except OSError as e: 

1736 if not e.errno == errno.EEXIST: 

1737 raise 

1738 

1739 

1740def ensuredir(dst): 

1741 ''' 

1742 Create directory and all intermediate path components to it as needed. 

1743 

1744 :param dst: directory name 

1745 

1746 Nothing is done if the given target already exists. 

1747 ''' 

1748 

1749 if os.path.exists(dst): 

1750 return 

1751 

1752 dst.rstrip(os.sep) 

1753 

1754 ensuredirs(dst) 

1755 try: 

1756 os.mkdir(dst) 

1757 except OSError as e: 

1758 if not e.errno == errno.EEXIST: 

1759 raise 

1760 

1761 

1762def reuse(x): 

1763 ''' 

1764 Get unique instance of an object. 

1765 

1766 :param x: hashable object 

1767 :returns: reference to x or an equivalent object 

1768 

1769 Cache object ``x`` in a global dict for reuse, or if x already 

1770 is in that dict, return a reference to it. 

1771 ''' 

1772 

1773 grs = GlobalVars.reuse_store 

1774 if x not in grs: 

1775 grs[x] = x 

1776 return grs[x] 

1777 

1778 

1779def deuse(x): 

1780 grs = GlobalVars.reuse_store 

1781 if x in grs: 

1782 del grs[x] 

1783 

1784 

1785class Anon(object): 

1786 ''' 

1787 Dict-to-object utility. 

1788 

1789 Any given arguments are stored as attributes. 

1790 

1791 Example:: 

1792 

1793 a = Anon(x=1, y=2) 

1794 print a.x, a.y 

1795 ''' 

1796 

1797 def __init__(self, **dict): 

1798 for k in dict: 

1799 self.__dict__[k] = dict[k] 

1800 

1801 

1802def iter_select_files( 

1803 paths, 

1804 include=None, 

1805 exclude=None, 

1806 selector=None, 

1807 show_progress=True, 

1808 pass_through=None): 

1809 

1810 ''' 

1811 Recursively select files (generator variant). 

1812 

1813 See :py:func:`select_files`. 

1814 ''' 

1815 

1816 if show_progress: 

1817 progress_beg('selecting files...') 

1818 

1819 ngood = 0 

1820 check_include = None 

1821 if include is not None: 

1822 rinclude = re.compile(include) 

1823 

1824 def check_include(path): 

1825 m = rinclude.search(path) 

1826 if not m: 

1827 return False 

1828 

1829 if selector is None: 

1830 return True 

1831 

1832 infos = Anon(**m.groupdict()) 

1833 return selector(infos) 

1834 

1835 check_exclude = None 

1836 if exclude is not None: 

1837 rexclude = re.compile(exclude) 

1838 

1839 def check_exclude(path): 

1840 return not bool(rexclude.search(path)) 

1841 

1842 if check_include and check_exclude: 

1843 

1844 def check(path): 

1845 return check_include(path) and check_exclude(path) 

1846 

1847 elif check_include: 

1848 check = check_include 

1849 

1850 elif check_exclude: 

1851 check = check_exclude 

1852 

1853 else: 

1854 check = None 

1855 

1856 if isinstance(paths, str): 

1857 paths = [paths] 

1858 

1859 for path in paths: 

1860 if pass_through and pass_through(path): 

1861 if check is None or check(path): 

1862 yield path 

1863 

1864 elif os.path.isdir(path): 

1865 for (dirpath, dirnames, filenames) in os.walk(path): 

1866 dirnames.sort() 

1867 filenames.sort() 

1868 for filename in filenames: 

1869 path = op.join(dirpath, filename) 

1870 if check is None or check(path): 

1871 yield os.path.abspath(path) 

1872 ngood += 1 

1873 else: 

1874 if check is None or check(path): 

1875 yield os.path.abspath(path) 

1876 ngood += 1 

1877 

1878 if show_progress: 

1879 progress_end('%i file%s selected.' % (ngood, plural_s(ngood))) 

1880 

1881 

1882def select_files( 

1883 paths, include=None, exclude=None, selector=None, show_progress=True, 

1884 regex=None): 

1885 

1886 ''' 

1887 Recursively select files. 

1888 

1889 :param paths: entry path names 

1890 :param include: pattern for conditional inclusion 

1891 :param exclude: pattern for conditional exclusion 

1892 :param selector: callback for conditional inclusion 

1893 :param show_progress: if True, indicate start and stop of processing 

1894 :param regex: alias for ``include`` (backwards compatibility) 

1895 :returns: list of path names 

1896 

1897 Recursively finds all files under given entry points ``paths``. If 

1898 parameter ``include`` is a regular expression, only files with matching 

1899 path names are included. If additionally parameter ``selector`` is given a 

1900 callback function, only files for which the callback returns ``True`` are 

1901 included. The callback should take a single argument. The callback is 

1902 called with a single argument, an object, having as attributes, any named 

1903 groups given in ``include``. 

1904 

1905 Examples 

1906 

1907 To find all files ending in ``'.mseed'`` or ``'.msd'``:: 

1908 

1909 select_files(paths, 

1910 include=r'\\.(mseed|msd)$') 

1911 

1912 To find all files ending with ``'$Year.$DayOfYear'``, having set 2009 for 

1913 the year:: 

1914 

1915 select_files(paths, 

1916 include=r'(?P<year>\\d\\d\\d\\d)\\.(?P<doy>\\d\\d\\d)$', 

1917 selector=(lambda x: int(x.year) == 2009)) 

1918 ''' 

1919 if regex is not None: 

1920 assert include is None 

1921 include = regex 

1922 

1923 return list(iter_select_files( 

1924 paths, include, exclude, selector, show_progress)) 

1925 

1926 

1927def base36encode(number, alphabet='0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'): 

1928 ''' 

1929 Convert positive integer to a base36 string. 

1930 ''' 

1931 

1932 if not isinstance(number, (int, long)): 

1933 raise TypeError('number must be an integer') 

1934 if number < 0: 

1935 raise ValueError('number must be positive') 

1936 

1937 # Special case for small numbers 

1938 if number < 36: 

1939 return alphabet[number] 

1940 

1941 base36 = '' 

1942 while number != 0: 

1943 number, i = divmod(number, 36) 

1944 base36 = alphabet[i] + base36 

1945 

1946 return base36 

1947 

1948 

1949def base36decode(number): 

1950 ''' 

1951 Decode base36 endcoded positive integer. 

1952 ''' 

1953 

1954 return int(number, 36) 

1955 

1956 

1957class UnpackError(Exception): 

1958 ''' 

1959 Exception raised when :py:func:`unpack_fixed` encounters an error. 

1960 ''' 

1961 

1962 pass 

1963 

1964 

1965ruler = ''.join(['%-10i' % i for i in range(8)]) \ 

1966 + '\n' + '0123456789' * 8 + '\n' 

1967 

1968 

1969def unpack_fixed(format, line, *callargs): 

1970 ''' 

1971 Unpack fixed format string, as produced by many fortran codes. 

1972 

1973 :param format: format specification 

1974 :param line: string to be processed 

1975 :param callargs: callbacks for callback fields in the format 

1976 

1977 The format is described by a string of comma-separated fields. Each field 

1978 is defined by a character for the field type followed by the field width. A 

1979 questionmark may be appended to the field description to allow the argument 

1980 to be optional (The data string is then allowed to be filled with blanks 

1981 and ``None`` is returned in this case). 

1982 

1983 The following field types are available: 

1984 

1985 ==== ================================================================ 

1986 Type Description 

1987 ==== ================================================================ 

1988 A string (full field width is extracted) 

1989 a string (whitespace at the beginning and the end is removed) 

1990 i integer value 

1991 f floating point value 

1992 @ special type, a callback must be given for the conversion 

1993 x special field type to skip parts of the string 

1994 ==== ================================================================ 

1995 ''' 

1996 

1997 ipos = 0 

1998 values = [] 

1999 icall = 0 

2000 for form in format.split(','): 

2001 form = form.strip() 

2002 optional = form[-1] == '?' 

2003 form = form.rstrip('?') 

2004 typ = form[0] 

2005 ln = int(form[1:]) 

2006 s = line[ipos:ipos+ln] 

2007 cast = { 

2008 'x': None, 

2009 'A': str, 

2010 'a': lambda x: x.strip(), 

2011 'i': int, 

2012 'f': float, 

2013 '@': 'extra'}[typ] 

2014 

2015 if cast == 'extra': 

2016 cast = callargs[icall] 

2017 icall += 1 

2018 

2019 if cast is not None: 

2020 if optional and s.strip() == '': 

2021 values.append(None) 

2022 else: 

2023 try: 

2024 values.append(cast(s)) 

2025 except Exception: 

2026 mark = [' '] * 80 

2027 mark[ipos:ipos+ln] = ['^'] * ln 

2028 mark = ''.join(mark) 

2029 raise UnpackError( 

2030 'Invalid cast to type "%s" at position [%i:%i] of ' 

2031 'line: \n%s%s\n%s' % ( 

2032 typ, ipos, ipos+ln, ruler, line.rstrip(), mark)) 

2033 

2034 ipos += ln 

2035 

2036 return values 

2037 

2038 

2039_pattern_cache = {} 

2040 

2041 

2042def _nslc_pattern(pattern): 

2043 if pattern not in _pattern_cache: 

2044 rpattern = re.compile(fnmatch.translate(pattern), re.I) 

2045 _pattern_cache[pattern] = rpattern 

2046 else: 

2047 rpattern = _pattern_cache[pattern] 

2048 

2049 return rpattern 

2050 

2051 

2052def match_nslc(patterns, nslc): 

2053 ''' 

2054 Match network-station-location-channel code against pattern or list of 

2055 patterns. 

2056 

2057 :param patterns: pattern or list of patterns 

2058 :param nslc: tuple with (network, station, location, channel) as strings 

2059 

2060 :returns: ``True`` if the pattern matches or if any of the given patterns 

2061 match; or ``False``. 

2062 

2063 The patterns may contain shell-style wildcards: \\*, ?, [seq], [!seq]. 

2064 

2065 Example:: 

2066 

2067 match_nslc('*.HAM3.*.BH?', ('GR', 'HAM3', '', 'BHZ')) # -> True 

2068 ''' 

2069 

2070 if isinstance(patterns, str): 

2071 patterns = [patterns] 

2072 

2073 if not isinstance(nslc, str): 

2074 s = '.'.join(nslc) 

2075 else: 

2076 s = nslc 

2077 

2078 for pattern in patterns: 

2079 if _nslc_pattern(pattern).match(s): 

2080 return True 

2081 

2082 return False 

2083 

2084 

2085def match_nslcs(patterns, nslcs): 

2086 ''' 

2087 Get network-station-location-channel codes that match given pattern or any 

2088 of several given patterns. 

2089 

2090 :param patterns: pattern or list of patterns 

2091 :param nslcs: list of (network, station, location, channel) tuples 

2092 

2093 See also :py:func:`match_nslc` 

2094 ''' 

2095 

2096 matching = [] 

2097 for nslc in nslcs: 

2098 if match_nslc(patterns, nslc): 

2099 matching.append(nslc) 

2100 

2101 return matching 

2102 

2103 

2104class Timeout(Exception): 

2105 pass 

2106 

2107 

2108def create_lockfile(fn, timeout=None, timewarn=10.): 

2109 t0 = time.time() 

2110 

2111 while True: 

2112 try: 

2113 f = os.open(fn, os.O_CREAT | os.O_WRONLY | os.O_EXCL) 

2114 os.close(f) 

2115 return 

2116 

2117 except OSError as e: 

2118 if e.errno in (errno.EEXIST, 13): # 13 occurs on windows 

2119 pass # retry 

2120 else: 

2121 raise 

2122 

2123 tnow = time.time() 

2124 

2125 if timeout is not None and tnow - t0 > timeout: 

2126 raise Timeout( 

2127 'Timeout (%gs) occured while waiting to get exclusive ' 

2128 'access to: %s' % (timeout, fn)) 

2129 

2130 if timewarn is not None and tnow - t0 > timewarn: 

2131 logger.warning( 

2132 'Waiting since %gs to get exclusive access to: %s' % ( 

2133 timewarn, fn)) 

2134 

2135 timewarn *= 2 

2136 

2137 time.sleep(0.01) 

2138 

2139 

2140def delete_lockfile(fn): 

2141 os.unlink(fn) 

2142 

2143 

2144class Lockfile(Exception): 

2145 

2146 def __init__(self, path, timeout=5, timewarn=10.): 

2147 self._path = path 

2148 self._timeout = timeout 

2149 self._timewarn = timewarn 

2150 

2151 def __enter__(self): 

2152 create_lockfile( 

2153 self._path, timeout=self._timeout, timewarn=self._timewarn) 

2154 return None 

2155 

2156 def __exit__(self, type, value, traceback): 

2157 delete_lockfile(self._path) 

2158 

2159 

2160class SoleError(Exception): 

2161 ''' 

2162 Exception raised by objects of type :py:class:`Sole`, when an concurrent 

2163 instance is running. 

2164 ''' 

2165 

2166 pass 

2167 

2168 

2169class Sole(object): 

2170 ''' 

2171 Use POSIX advisory file locking to ensure that only a single instance of a 

2172 program is running. 

2173 

2174 :param pid_path: path to lockfile to be used 

2175 

2176 Usage:: 

2177 

2178 from pyrocko.util import Sole, SoleError, setup_logging 

2179 import os 

2180 

2181 setup_logging('my_program') 

2182 

2183 pid_path = os.path.join(os.environ['HOME'], '.my_program_lock') 

2184 try: 

2185 sole = Sole(pid_path) 

2186 

2187 except SoleError, e: 

2188 logger.fatal( str(e) ) 

2189 sys.exit(1) 

2190 ''' 

2191 

2192 def __init__(self, pid_path): 

2193 self._pid_path = pid_path 

2194 self._other_running = False 

2195 ensuredirs(self._pid_path) 

2196 self._lockfile = None 

2197 self._os = os 

2198 

2199 if not fcntl: 

2200 raise SoleError( 

2201 'Python standard library module "fcntl" not available on ' 

2202 'this platform.') 

2203 

2204 self._fcntl = fcntl 

2205 

2206 try: 

2207 self._lockfile = os.open(self._pid_path, os.O_CREAT | os.O_WRONLY) 

2208 except Exception: 

2209 raise SoleError( 

2210 'Cannot open lockfile (path = %s)' % self._pid_path) 

2211 

2212 try: 

2213 fcntl.lockf(self._lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB) 

2214 

2215 except IOError: 

2216 self._other_running = True 

2217 try: 

2218 f = open(self._pid_path, 'r') 

2219 pid = f.read().strip() 

2220 f.close() 

2221 except Exception: 

2222 pid = '?' 

2223 

2224 raise SoleError('Other instance is running (pid = %s)' % pid) 

2225 

2226 try: 

2227 os.ftruncate(self._lockfile, 0) 

2228 os.write(self._lockfile, '%i\n' % os.getpid()) 

2229 os.fsync(self._lockfile) 

2230 

2231 except Exception: 

2232 # the pid is only stored for user information, so this is allowed 

2233 # to fail 

2234 pass 

2235 

2236 def __del__(self): 

2237 if not self._other_running: 

2238 if self._lockfile is not None: 

2239 self._fcntl.lockf(self._lockfile, self._fcntl.LOCK_UN) 

2240 self._os.close(self._lockfile) 

2241 try: 

2242 self._os.unlink(self._pid_path) 

2243 except Exception: 

2244 pass 

2245 

2246 

2247re_escapequotes = re.compile(r"(['\\])") 

2248 

2249 

2250def escapequotes(s): 

2251 return re_escapequotes.sub(r'\\\1', s) 

2252 

2253 

2254class TableWriter(object): 

2255 ''' 

2256 Write table of space separated values to a file. 

2257 

2258 :param f: file like object 

2259 

2260 Strings containing spaces are quoted on output. 

2261 ''' 

2262 

2263 def __init__(self, f): 

2264 self._f = f 

2265 

2266 def writerow(self, row, minfieldwidths=None): 

2267 

2268 ''' 

2269 Write one row of values to underlying file. 

2270 

2271 :param row: iterable of values 

2272 :param minfieldwidths: minimum field widths for the values 

2273 

2274 Each value in in ``row`` is converted to a string and optionally padded 

2275 with blanks. The resulting strings are output separated with blanks. If 

2276 any values given are strings and if they contain whitespace, they are 

2277 quoted with single quotes, and any internal single quotes are 

2278 backslash-escaped. 

2279 ''' 

2280 

2281 out = [] 

2282 

2283 for i, x in enumerate(row): 

2284 w = 0 

2285 if minfieldwidths and i < len(minfieldwidths): 

2286 w = minfieldwidths[i] 

2287 

2288 if isinstance(x, str): 

2289 if re.search(r"\s|'", x): 

2290 x = "'%s'" % escapequotes(x) 

2291 

2292 x = x.ljust(w) 

2293 else: 

2294 x = str(x).rjust(w) 

2295 

2296 out.append(x) 

2297 

2298 self._f.write(' '.join(out).rstrip() + '\n') 

2299 

2300 

2301class TableReader(object): 

2302 

2303 ''' 

2304 Read table of space separated values from a file. 

2305 

2306 :param f: file-like object 

2307 

2308 This uses Pythons shlex module to tokenize lines. Should deal correctly 

2309 with quoted strings. 

2310 ''' 

2311 

2312 def __init__(self, f): 

2313 self._f = f 

2314 self.eof = False 

2315 

2316 def readrow(self): 

2317 ''' 

2318 Read one row from the underlying file, tokenize it with shlex. 

2319 

2320 :returns: tokenized line as a list of strings. 

2321 ''' 

2322 

2323 line = self._f.readline() 

2324 if not line: 

2325 self.eof = True 

2326 return [] 

2327 line.strip() 

2328 if line.startswith('#'): 

2329 return [] 

2330 

2331 return qsplit(line) 

2332 

2333 

2334def gform(number, significant_digits=3): 

2335 ''' 

2336 Pretty print floating point numbers. 

2337 

2338 Align floating point numbers at the decimal dot. 

2339 

2340 :: 

2341 

2342 | -d.dde+xxx| 

2343 | -d.dde+xx | 

2344 |-ddd. | 

2345 | -dd.d | 

2346 | -d.dd | 

2347 | -0.ddd | 

2348 | -0.0ddd | 

2349 | -0.00ddd | 

2350 | -d.dde-xx | 

2351 | -d.dde-xxx| 

2352 | nan| 

2353 

2354 

2355 The formatted string has length ``significant_digits * 2 + 6``. 

2356 ''' 

2357 

2358 no_exp_range = (pow(10., -1), 

2359 pow(10., significant_digits)) 

2360 width = significant_digits+significant_digits-1+1+1+5 

2361 

2362 if (no_exp_range[0] <= abs(number) < no_exp_range[1]) or number == 0.: 

2363 s = ('%#.*g' % (significant_digits, number)).rstrip('0') 

2364 else: 

2365 s = '%.*E' % (significant_digits-1, number) 

2366 s = (' '*(-s.find('.')+(significant_digits+1))+s).ljust(width) 

2367 if s.strip().lower() == 'nan': 

2368 s = 'nan'.rjust(width) 

2369 return s 

2370 

2371 

2372def human_bytesize(value): 

2373 

2374 exts = 'Bytes kB MB GB TB PB EB ZB YB'.split() 

2375 

2376 if value == 1: 

2377 return '1 Byte' 

2378 

2379 for i, ext in enumerate(exts): 

2380 x = float(value) / 1000**i 

2381 if round(x) < 10. and not value < 1000: 

2382 return '%.1f %s' % (x, ext) 

2383 if round(x) < 1000.: 

2384 return '%.0f %s' % (x, ext) 

2385 

2386 return '%i Bytes' % value 

2387 

2388 

2389re_compatibility = re.compile( 

2390 r'!pyrocko\.(trace|gf\.(meta|seismosizer)|fomosto\.' + 

2391 r'(dummy|poel|qseis|qssp))\.' 

2392) 

2393 

2394 

2395def pf_is_old(fn): 

2396 oldstyle = False 

2397 with open(fn, 'r') as f: 

2398 for line in f: 

2399 if re_compatibility.search(line): 

2400 oldstyle = True 

2401 

2402 return oldstyle 

2403 

2404 

2405def pf_upgrade(fn): 

2406 need = pf_is_old(fn) 

2407 if need: 

2408 fn_temp = fn + '.temp' 

2409 

2410 with open(fn, 'r') as fin: 

2411 with open(fn_temp, 'w') as fout: 

2412 for line in fin: 

2413 line = re_compatibility.sub('!pf.', line) 

2414 fout.write(line) 

2415 

2416 os.rename(fn_temp, fn) 

2417 

2418 return need 

2419 

2420 

2421def read_leap_seconds(tzfile='/usr/share/zoneinfo/right/UTC'): 

2422 ''' 

2423 Extract leap second information from tzdata. 

2424 

2425 Based on example at http://stackoverflow.com/questions/19332902/\ 

2426 extract-historic-leap-seconds-from-tzdata 

2427 

2428 See also 'man 5 tzfile'. 

2429 ''' 

2430 

2431 from struct import unpack, calcsize 

2432 out = [] 

2433 with open(tzfile, 'rb') as f: 

2434 # read header 

2435 fmt = '>4s c 15x 6l' 

2436 (magic, format, ttisgmtcnt, ttisstdcnt, leapcnt, timecnt, 

2437 typecnt, charcnt) = unpack(fmt, f.read(calcsize(fmt))) 

2438 assert magic == 'TZif'.encode('US-ASCII'), 'Not a timezone file' 

2439 

2440 # skip over some uninteresting data 

2441 fmt = '>%(timecnt)dl %(timecnt)dB %(ttinfo)s %(charcnt)ds' % dict( 

2442 timecnt=timecnt, ttinfo='lBB'*typecnt, charcnt=charcnt) 

2443 f.read(calcsize(fmt)) 

2444 

2445 # read leap-seconds 

2446 fmt = '>2l' 

2447 for i in range(leapcnt): 

2448 tleap, nleap = unpack(fmt, f.read(calcsize(fmt))) 

2449 out.append((tleap-nleap+1, nleap)) 

2450 

2451 return out 

2452 

2453 

2454class LeapSecondsError(Exception): 

2455 pass 

2456 

2457 

2458class LeapSecondsOutdated(LeapSecondsError): 

2459 pass 

2460 

2461 

2462class InvalidLeapSecondsFile(LeapSecondsOutdated): 

2463 pass 

2464 

2465 

2466def parse_leap_seconds_list(fn): 

2467 data = [] 

2468 texpires = None 

2469 try: 

2470 t0 = int(round(str_to_time('1900-01-01 00:00:00'))) 

2471 except TimeStrError: 

2472 t0 = int(round(str_to_time('1970-01-01 00:00:00'))) - 2208988800 

2473 

2474 tnow = int(round(time.time())) 

2475 

2476 if not op.exists(fn): 

2477 raise LeapSecondsOutdated('no leap seconds file found') 

2478 

2479 try: 

2480 with open(fn, 'rb') as f: 

2481 for line in f: 

2482 if line.strip().startswith(b'<!DOCTYPE'): 

2483 raise InvalidLeapSecondsFile('invalid leap seconds file') 

2484 

2485 if line.startswith(b'#@'): 

2486 texpires = int(line.split()[1]) + t0 

2487 elif line.startswith(b'#') or len(line) < 5: 

2488 pass 

2489 else: 

2490 toks = line.split() 

2491 t = int(toks[0]) + t0 

2492 nleap = int(toks[1]) - 10 

2493 data.append((t, nleap)) 

2494 

2495 except IOError: 

2496 raise LeapSecondsError('cannot read leap seconds file %s' % fn) 

2497 

2498 if texpires is None or tnow > texpires: 

2499 raise LeapSecondsOutdated('leap seconds list is outdated') 

2500 

2501 return data 

2502 

2503 

2504def read_leap_seconds2(): 

2505 from pyrocko import config 

2506 conf = config.config() 

2507 fn = conf.leapseconds_path 

2508 url = conf.leapseconds_url 

2509 # check for outdated default URL 

2510 if url == 'http://www.ietf.org/timezones/data/leap-seconds.list': 

2511 url = 'https://www.ietf.org/timezones/data/leap-seconds.list' 

2512 logger.info( 

2513 'Leap seconds default URL is now: %s\nUsing new default.' % url) 

2514 

2515 if url == 'https://www.ietf.org/timezones/data/leap-seconds.list': 

2516 url = 'https://hpiers.obspm.fr/iers/bul/bulc/ntp/leap-seconds.list' 

2517 logger.info( 

2518 'Leap seconds default URL is now: %s\nUsing new default.' % url) 

2519 

2520 for i in range(3): 

2521 try: 

2522 return parse_leap_seconds_list(fn) 

2523 

2524 except LeapSecondsOutdated: 

2525 try: 

2526 logger.info('updating leap seconds list...') 

2527 download_file(url, fn) 

2528 

2529 except Exception as e: 

2530 raise LeapSecondsError( 

2531 'cannot download leap seconds list from %s to %s (%s)' 

2532 % (url, fn, e)) 

2533 

2534 raise LeapSecondsError('Could not retrieve/read leap seconds file.') 

2535 

2536 

2537def gps_utc_offset(t_utc): 

2538 ''' 

2539 Time offset t_gps - t_utc for a given t_utc. 

2540 ''' 

2541 ls = read_leap_seconds2() 

2542 i = 0 

2543 if t_utc < ls[0][0]: 

2544 return ls[0][1] - 1 - 9 

2545 

2546 while i < len(ls) - 1: 

2547 if ls[i][0] <= t_utc and t_utc < ls[i+1][0]: 

2548 return ls[i][1] - 9 

2549 i += 1 

2550 

2551 return ls[-1][1] - 9 

2552 

2553 

2554def utc_gps_offset(t_gps): 

2555 ''' 

2556 Time offset t_utc - t_gps for a given t_gps. 

2557 ''' 

2558 ls = read_leap_seconds2() 

2559 

2560 if t_gps < ls[0][0] + ls[0][1] - 9: 

2561 return - (ls[0][1] - 1 - 9) 

2562 

2563 i = 0 

2564 while i < len(ls) - 1: 

2565 if ls[i][0] + ls[i][1] - 9 <= t_gps \ 

2566 and t_gps < ls[i+1][0] + ls[i+1][1] - 9: 

2567 return - (ls[i][1] - 9) 

2568 i += 1 

2569 

2570 return - (ls[-1][1] - 9) 

2571 

2572 

2573def make_iload_family(iload_fh, doc_fmt='FMT', doc_yielded_objects='FMT'): 

2574 import itertools 

2575 import glob 

2576 from pyrocko.io.io_common import FileLoadError 

2577 

2578 def iload_filename(filename, **kwargs): 

2579 try: 

2580 with open(filename, 'rb') as f: 

2581 for cr in iload_fh(f, **kwargs): 

2582 yield cr 

2583 

2584 except FileLoadError as e: 

2585 e.set_context('filename', filename) 

2586 raise 

2587 

2588 def iload_dirname(dirname, **kwargs): 

2589 for entry in os.listdir(dirname): 

2590 fpath = op.join(dirname, entry) 

2591 if op.isfile(fpath): 

2592 for cr in iload_filename(fpath, **kwargs): 

2593 yield cr 

2594 

2595 def iload_glob(pattern, **kwargs): 

2596 

2597 for fn in glob.iglob(pattern): 

2598 for cr in iload_filename(fn, **kwargs): 

2599 yield cr 

2600 

2601 def iload(source, **kwargs): 

2602 if isinstance(source, str): 

2603 if op.isdir(source): 

2604 return iload_dirname(source, **kwargs) 

2605 elif op.isfile(source): 

2606 return iload_filename(source, **kwargs) 

2607 else: 

2608 return iload_glob(source, **kwargs) 

2609 

2610 elif hasattr(source, 'read'): 

2611 return iload_fh(source, **kwargs) 

2612 else: 

2613 return itertools.chain.from_iterable( 

2614 iload(subsource, **kwargs) for subsource in source) 

2615 

2616 iload_filename.__doc__ = ''' 

2617 Read %s information from named file. 

2618 ''' % doc_fmt 

2619 

2620 iload_dirname.__doc__ = ''' 

2621 Read %s information from directory of %s files. 

2622 ''' % (doc_fmt, doc_fmt) 

2623 

2624 iload_glob.__doc__ = ''' 

2625 Read %s information from files matching a glob pattern. 

2626 ''' % doc_fmt 

2627 

2628 iload.__doc__ = ''' 

2629 Load %s information from given source(s) 

2630 

2631 The ``source`` can be specified as the name of a %s file, the name of a 

2632 directory containing %s files, a glob pattern of %s files, an open 

2633 filehandle or an iterator yielding any of the forementioned sources. 

2634 

2635 This function behaves as a generator yielding %s objects. 

2636 ''' % (doc_fmt, doc_fmt, doc_fmt, doc_fmt, doc_yielded_objects) 

2637 

2638 for f in iload_filename, iload_dirname, iload_glob, iload: 

2639 f.__module__ = iload_fh.__module__ 

2640 

2641 return iload_filename, iload_dirname, iload_glob, iload 

2642 

2643 

2644class Inconsistency(Exception): 

2645 pass 

2646 

2647 

2648def consistency_check(list_of_tuples, message='values differ:'): 

2649 ''' 

2650 Check for inconsistencies. 

2651 

2652 Given a list of tuples, check that all tuple elements except for first one 

2653 match. E.g. ``[('STA.N', 55.3, 103.2), ('STA.E', 55.3, 103.2)]`` would be 

2654 valid because the coordinates at the two channels are the same. 

2655 ''' 

2656 

2657 if len(list_of_tuples) >= 2: 

2658 if any(t[1:] != list_of_tuples[0][1:] for t in list_of_tuples[1:]): 

2659 raise Inconsistency('%s\n' % message + '\n'.join( 

2660 ' %s: %s' % (t[0], ', '.join(str(x) for x in t[1:])) 

2661 for t in list_of_tuples)) 

2662 

2663 

2664class defaultzerodict(dict): 

2665 def __missing__(self, k): 

2666 return 0 

2667 

2668 

2669def mostfrequent(x): 

2670 c = defaultzerodict() 

2671 for e in x: 

2672 c[e] += 1 

2673 

2674 return sorted(list(c.keys()), key=lambda k: c[k])[-1] 

2675 

2676 

2677def consistency_merge(list_of_tuples, 

2678 message='values differ:', 

2679 error='raise', 

2680 merge=mostfrequent): 

2681 

2682 assert error in ('raise', 'warn', 'ignore') 

2683 

2684 if len(list_of_tuples) == 0: 

2685 raise Exception('cannot merge empty sequence') 

2686 

2687 try: 

2688 consistency_check(list_of_tuples, message) 

2689 return list_of_tuples[0][1:] 

2690 except Inconsistency as e: 

2691 if error == 'raise': 

2692 raise 

2693 

2694 elif error == 'warn': 

2695 logger.warning(str(e)) 

2696 

2697 return tuple([merge(x) for x in list(zip(*list_of_tuples))[1:]]) 

2698 

2699 

2700def short_to_list(nmax, it): 

2701 import itertools 

2702 

2703 if isinstance(it, list): 

2704 return it 

2705 

2706 li = [] 

2707 for i in range(nmax+1): 

2708 try: 

2709 li.append(next(it)) 

2710 except StopIteration: 

2711 return li 

2712 

2713 return itertools.chain(li, it) 

2714 

2715 

2716def parse_md(f): 

2717 try: 

2718 with open(op.join( 

2719 op.dirname(op.abspath(f)), 

2720 'README.md'), 'r') as readme: 

2721 mdstr = readme.read() 

2722 except IOError as e: 

2723 return 'Failed to get README.md: %s' % e 

2724 

2725 # Remve the title 

2726 mdstr = re.sub(r'^# .*\n?', '', mdstr) 

2727 # Append sphinx reference to `pyrocko.` modules 

2728 mdstr = re.sub(r'`pyrocko\.(.*)`', r':py:mod:`pyrocko.\1`', mdstr) 

2729 # Convert Subsections to toc-less rubrics 

2730 mdstr = re.sub(r'## (.*)\n', r'.. rubric:: \1\n', mdstr) 

2731 return mdstr 

2732 

2733 

2734def mpl_show(plt): 

2735 import matplotlib 

2736 if matplotlib.get_backend().lower() == 'agg': 

2737 logger.warning('Cannot show() when using matplotlib "agg" backend') 

2738 else: 

2739 plt.show() 

2740 

2741 

2742g_re_qsplit = re.compile( 

2743 r'"([^"\\]*(?:\\.[^"\\]*)*)"|\'([^\'\\]*(?:\\.[^\'\\]*)*)\'|(\S+)') 

2744g_re_qsplit_sep = {} 

2745 

2746 

2747def get_re_qsplit(sep): 

2748 if sep is None: 

2749 return g_re_qsplit 

2750 else: 

2751 if sep not in g_re_qsplit_sep: 

2752 assert len(sep) == 1 

2753 assert sep not in '\'"' 

2754 esep = re.escape(sep) 

2755 g_re_qsplit_sep[sep] = re.compile( 

2756 r'"([^"\\]*(?:\\.[^"\\]*)*)"|\'([^\'\\]*(?:\\.[^\'\\]*)*)\'|' 

2757 + r'([^' + esep + r']+|(?<=' + esep + r')(?=' + esep + r')|^(?=' + esep + r')|(?<=' + esep + r')$)') # noqa 

2758 return g_re_qsplit_sep[sep] 

2759 

2760 

2761g_re_trivial = re.compile(r'\A[^\'"\s]+\Z') 

2762g_re_trivial_sep = {} 

2763 

2764 

2765def get_re_trivial(sep): 

2766 if sep is None: 

2767 return g_re_trivial 

2768 else: 

2769 if sep not in g_re_qsplit_sep: 

2770 assert len(sep) == 1 

2771 assert sep not in '\'"' 

2772 esep = re.escape(sep) 

2773 g_re_trivial_sep[sep] = re.compile(r'\A[^\'"' + esep + r']+\Z') 

2774 

2775 return g_re_trivial_sep[sep] 

2776 

2777 

2778g_re_escape_s = re.compile(r'([\\\'])') 

2779g_re_unescape_s = re.compile(r'\\([\\\'])') 

2780g_re_escape_d = re.compile(r'([\\"])') 

2781g_re_unescape_d = re.compile(r'\\([\\"])') 

2782 

2783 

2784def escape_s(s): 

2785 ''' 

2786 Backslash-escape single-quotes and backslashes. 

2787 

2788 Example: ``Jack's`` => ``Jack\\'s`` 

2789 

2790 ''' 

2791 return g_re_escape_s.sub(r'\\\1', s) 

2792 

2793 

2794def unescape_s(s): 

2795 ''' 

2796 Unescape backslash-escaped single-quotes and backslashes. 

2797 

2798 Example: ``Jack\\'s`` => ``Jack's`` 

2799 ''' 

2800 return g_re_unescape_s.sub(r'\1', s) 

2801 

2802 

2803def escape_d(s): 

2804 ''' 

2805 Backslash-escape double-quotes and backslashes. 

2806 

2807 Example: ``"Hello \\O/"`` => ``\\"Hello \\\\O/\\"`` 

2808 ''' 

2809 return g_re_escape_d.sub(r'\\\1', s) 

2810 

2811 

2812def unescape_d(s): 

2813 ''' 

2814 Unescape backslash-escaped double-quotes and backslashes. 

2815 

2816 Example: ``\\"Hello \\\\O/\\"`` => ``"Hello \\O/"`` 

2817 ''' 

2818 return g_re_unescape_d.sub(r'\1', s) 

2819 

2820 

2821def qjoin_s(it, sep=None): 

2822 ''' 

2823 Join sequence of strings into a line, single-quoting non-trivial strings. 

2824 

2825 Example: ``["55", "Sparrow's Island"]`` => ``"55 'Sparrow\\\\'s Island'"`` 

2826 ''' 

2827 re_trivial = get_re_trivial(sep) 

2828 

2829 if sep is None: 

2830 sep = ' ' 

2831 

2832 return sep.join( 

2833 w if re_trivial.search(w) else "'%s'" % escape_s(w) for w in it) 

2834 

2835 

2836def qjoin_d(it, sep=None): 

2837 ''' 

2838 Join sequence of strings into a line, double-quoting non-trivial strings. 

2839 

2840 Example: ``['55', 'Pete "The Robot" Smith']`` => 

2841 ``'55' "Pete \\\\"The Robot\\\\" Smith"'`` 

2842 ''' 

2843 re_trivial = get_re_trivial(sep) 

2844 if sep is None: 

2845 sep = ' ' 

2846 

2847 return sep.join( 

2848 w if re_trivial.search(w) else '"%s"' % escape_d(w) for w in it) 

2849 

2850 

2851def qsplit(s, sep=None): 

2852 ''' 

2853 Split line into list of strings, allowing for quoted strings. 

2854 

2855 Example: ``"55 'Sparrow\\\\'s Island'"`` => 

2856 ``["55", "Sparrow's Island"]``, 

2857 ``'55' "Pete \\\\"The Robot\\\\" Smith"'`` => 

2858 ``['55', 'Pete "The Robot" Smith']`` 

2859 ''' 

2860 re_qsplit = get_re_qsplit(sep) 

2861 return [ 

2862 (unescape_d(x[0]) or unescape_s(x[1]) or x[2]) 

2863 for x in re_qsplit.findall(s)] 

2864 

2865 

2866g_have_warned_threadpoolctl = False 

2867 

2868 

2869class threadpool_limits_dummy(object): 

2870 

2871 def __init__(self, *args, **kwargs): 

2872 pass 

2873 

2874 def __enter__(self): 

2875 global g_have_warned_threadpoolctl 

2876 

2877 if not g_have_warned_threadpoolctl: 

2878 logger.warning( 

2879 'Cannot control number of BLAS threads because ' 

2880 '`threadpoolctl` module is not available. You may want to ' 

2881 'install `threadpoolctl`.') 

2882 

2883 g_have_warned_threadpoolctl = True 

2884 

2885 return self 

2886 

2887 def __exit__(self, type, value, traceback): 

2888 pass 

2889 

2890 

2891def get_threadpool_limits(): 

2892 ''' 

2893 Try to import threadpoolctl.threadpool_limits, provide dummy if not avail. 

2894 ''' 

2895 

2896 try: 

2897 from threadpoolctl import threadpool_limits 

2898 return threadpool_limits 

2899 

2900 except ImportError: 

2901 return threadpool_limits_dummy 

2902 

2903 

2904def fmt_summary(entries, widths): 

2905 return ' | '.join( 

2906 entry.ljust(width) for (entry, width) in zip(entries, widths)) 

2907 

2908 

2909def smart_weakref(obj, callback=None): 

2910 if inspect.ismethod(obj): 

2911 return weakref.WeakMethod(obj, callback) 

2912 else: 

2913 return weakref.ref(obj, callback)