1# http://pyrocko.org - GPLv3 

2 

3# 

4# The Pyrocko Developers, 21st Century 

5# ---|P------/S----------~Lg---------- 

6''' 

7Utility functions for Pyrocko. 

8 

9.. _time-handling-mode: 

10 

11High precision time handling mode 

12................................. 

13 

14Pyrocko can treat timestamps either as standard double precision (64 bit) 

15floating point values, or as high precision float (:py:class:`numpy.float128` 

16or :py:class:`numpy.float96`, whichever is available), aliased here as 

17:py:class:`pyrocko.util.hpfloat`. High precision time stamps are required when 

18handling data with sub-millisecond precision, i.e. kHz/MHz data streams and 

19event catalogs derived from such data. 

20 

21Not all functions in Pyrocko and in programs depending on Pyrocko may work 

22correctly with high precision times. Therefore, Pyrocko's high precision time 

23handling mode has to be actively activated by user config, command line option 

24or enforced within a certain script/program. 

25 

26The default high precision time handling mode can be configured globally with 

27the user config variable 

28:py:gattr:`~pyrocko.config.Config.use_high_precision_time`. Calling the 

29function :py:func:`use_high_precision_time` overrides the default from the 

30config file. This function may be called at startup of a program/script 

31requiring a specific time handling mode. 

32 

33To create a valid time stamp for use in Pyrocko (e.g. in 

34:py:class:`~pyrocko.model.Event` or :py:class:`~pyrocko.trace.Trace` objects), 

35use: 

36 

37.. code-block :: python 

38 

39 import time 

40 from pyrocko import util 

41 

42 # By default using mode selected in user config, override with: 

43 # util.use_high_precision_time(True) # force high precision mode 

44 # util.use_high_precision_time(False) # force low precision mode 

45 

46 t1 = util.str_to_time('2020-08-27 10:22:00') 

47 t2 = util.str_to_time('2020-08-27 10:22:00.111222') 

48 t3 = util.to_time_float(time.time()) 

49 

50 # To get the appropriate float class, use: 

51 

52 time_float = util.get_time_float() 

53 # -> float, numpy.float128 or numpy.float96 

54 [isinstance(t, time_float) for t in [t1, t2, t3]] 

55 # -> [True, True, True] 

56 

57 # Shortcut: 

58 util.check_time_class(t1) 

59 

60Module content 

61.............. 

62''' 

63 

64from __future__ import division, print_function 

65 

66import time 

67import logging 

68import os 

69import sys 

70import re 

71import calendar 

72import math 

73import fnmatch 

74try: 

75 import fcntl 

76except ImportError: 

77 fcntl = None 

78import optparse 

79import os.path as op 

80import errno 

81 

82import numpy as num 

83from scipy import signal 

84import pyrocko 

85from pyrocko import dummy_progressbar 

86 

87 

88try: 

89 from urllib.parse import urlencode, quote, unquote # noqa 

90 from urllib.request import ( 

91 Request, build_opener, HTTPDigestAuthHandler, urlopen as _urlopen) # noqa 

92 from urllib.error import HTTPError, URLError # noqa 

93 

94except ImportError: 

95 from urllib import urlencode, quote, unquote # noqa 

96 from urllib2 import (Request, build_opener, HTTPDigestAuthHandler, # noqa 

97 HTTPError, URLError, urlopen as _urlopen) # noqa 

98 

99try: 

100 import certifi 

101 import ssl 

102 g_ssl_context = ssl.create_default_context(cafile=certifi.where()) 

103except ImportError: 

104 g_ssl_context = None 

105 

106 

107class URLErrorSSL(URLError): 

108 

109 def __init__(self, urlerror): 

110 self.__dict__ = urlerror.__dict__.copy() 

111 

112 def __str__(self): 

113 return ( 

114 'Requesting web resource failed and the problem could be ' 

115 'related to SSL. Python standard libraries on some older ' 

116 'systems (like Ubuntu 14.04) are known to have trouble ' 

117 'with some SSL setups of today\'s servers: %s' 

118 % URLError.__str__(self)) 

119 

120 

121def urlopen_ssl_check(*args, **kwargs): 

122 try: 

123 return urlopen(*args, **kwargs) 

124 except URLError as e: 

125 if str(e).find('SSL') != -1: 

126 raise URLErrorSSL(e) 

127 else: 

128 raise 

129 

130 

131def urlopen(*args, **kwargs): 

132 

133 if 'context' not in kwargs and g_ssl_context is not None: 

134 kwargs['context'] = g_ssl_context 

135 

136 return _urlopen(*args, **kwargs) 

137 

138 

139try: 

140 long 

141except NameError: 

142 long = int 

143 

144 

145force_dummy_progressbar = False 

146 

147 

148try: 

149 from pyrocko import util_ext 

150except ImportError: 

151 util_ext = None 

152 

153 

154logger = logging.getLogger('pyrocko.util') 

155 

156try: 

157 import progressbar as progressbar_mod 

158except ImportError: 

159 from pyrocko import dummy_progressbar as progressbar_mod 

160 

161 

162try: 

163 num_full = num.full 

164except AttributeError: 

165 def num_full(shape, fill_value, dtype=None, order='C'): 

166 a = num.empty(shape, dtype=dtype, order=order) 

167 a.fill(fill_value) 

168 return a 

169 

170try: 

171 num_full_like = num.full_like 

172except AttributeError: 

173 def num_full_like(arr, fill_value, dtype=None, order='K', subok=True): 

174 a = num.empty_like(arr, dtype=dtype, order=order, subok=subok) 

175 a.fill(fill_value) 

176 return a 

177 

178 

179def progressbar_module(): 

180 return progressbar_mod 

181 

182 

183g_setup_logging_args = 'pyrocko', 'warning' 

184 

185 

186def setup_logging(programname='pyrocko', levelname='warning'): 

187 ''' 

188 Initialize logging. 

189 

190 :param programname: program name to be written in log 

191 :param levelname: string indicating the logging level ('debug', 'info', 

192 'warning', 'error', 'critical') 

193 

194 This function is called at startup by most pyrocko programs to set up a 

195 consistent logging format. This is simply a shortcut to a call to 

196 :py:func:`logging.basicConfig()`. 

197 ''' 

198 

199 global g_setup_logging_args 

200 g_setup_logging_args = (programname, levelname) 

201 

202 levels = {'debug': logging.DEBUG, 

203 'info': logging.INFO, 

204 'warning': logging.WARNING, 

205 'error': logging.ERROR, 

206 'critical': logging.CRITICAL} 

207 

208 logging.basicConfig( 

209 level=levels[levelname], 

210 format=programname+':%(name)-25s - %(levelname)-8s - %(message)s') 

211 

212 

213def subprocess_setup_logging_args(): 

214 ''' 

215 Get arguments from previous call to setup_logging. 

216 

217 These can be sent down to a worker process so it can setup its logging 

218 in the same way as the main process. 

219 ''' 

220 return g_setup_logging_args 

221 

222 

223def data_file(fn): 

224 return os.path.join(os.path.split(__file__)[0], 'data', fn) 

225 

226 

227class DownloadError(Exception): 

228 pass 

229 

230 

231class PathExists(DownloadError): 

232 pass 

233 

234 

235def _download(url, fpath, username=None, password=None, 

236 force=False, method='download', stats=None, 

237 status_callback=None, entries_wanted=None, 

238 recursive=False, header=None): 

239 

240 import requests 

241 from requests.auth import HTTPBasicAuth 

242 from requests.exceptions import HTTPError as req_HTTPError 

243 

244 requests.adapters.DEFAULT_RETRIES = 5 

245 urljoin = requests.compat.urljoin 

246 

247 session = requests.Session() 

248 session.header = header 

249 session.auth = None if username is None\ 

250 else HTTPBasicAuth(username, password) 

251 

252 status = { 

253 'ntotal_files': 0, 

254 'nread_files': 0, 

255 'ntotal_bytes_all_files': 0, 

256 'nread_bytes_all_files': 0, 

257 'ntotal_bytes_current_file': 0, 

258 'nread_bytes_current_file': 0, 

259 'finished': False 

260 } 

261 

262 try: 

263 url_to_size = {} 

264 

265 if callable(status_callback): 

266 status_callback(status) 

267 

268 if not recursive and url.endswith('/'): 

269 raise DownloadError( 

270 'URL: %s appears to be a directory' 

271 ' but recurvise download is False' % url) 

272 

273 if recursive and not url.endswith('/'): 

274 url += '/' 

275 

276 def parse_directory_tree(url, subdir=''): 

277 r = session.get(urljoin(url, subdir)) 

278 r.raise_for_status() 

279 

280 entries = re.findall(r'href="([a-zA-Z0-9_.-]+/?)"', r.text) 

281 

282 files = sorted(set(subdir + fn for fn in entries 

283 if not fn.endswith('/'))) 

284 

285 if entries_wanted is not None: 

286 files = [fn for fn in files 

287 if (fn in wanted for wanted in entries_wanted)] 

288 

289 status['ntotal_files'] += len(files) 

290 

291 dirs = sorted(set(subdir + dn for dn in entries 

292 if dn.endswith('/') 

293 and dn not in ('./', '../'))) 

294 

295 for dn in dirs: 

296 files.extend(parse_directory_tree( 

297 url, subdir=dn)) 

298 

299 return files 

300 

301 def get_content_length(url): 

302 if url not in url_to_size: 

303 r = session.head(url, headers={'Accept-Encoding': ''}) 

304 

305 content_length = r.headers.get('content-length', None) 

306 if content_length is None: 

307 logger.debug('Could not get HTTP header ' 

308 'Content-Length for %s' % url) 

309 

310 content_length = None 

311 

312 else: 

313 content_length = int(content_length) 

314 status['ntotal_bytes_all_files'] += content_length 

315 if callable(status_callback): 

316 status_callback(status) 

317 

318 url_to_size[url] = content_length 

319 

320 return url_to_size[url] 

321 

322 def download_file(url, fn): 

323 logger.info('starting download of %s...' % url) 

324 ensuredirs(fn) 

325 

326 fsize = get_content_length(url) 

327 status['ntotal_bytes_current_file'] = fsize 

328 status['nread_bytes_current_file'] = 0 

329 if callable(status_callback): 

330 status_callback(status) 

331 

332 r = session.get(url, stream=True, timeout=5) 

333 r.raise_for_status() 

334 

335 frx = 0 

336 fn_tmp = fn + '.%i.temp' % os.getpid() 

337 with open(fn_tmp, 'wb') as f: 

338 for d in r.iter_content(chunk_size=1024): 

339 f.write(d) 

340 frx += len(d) 

341 

342 status['nread_bytes_all_files'] += len(d) 

343 status['nread_bytes_current_file'] += len(d) 

344 if callable(status_callback): 

345 status_callback(status) 

346 

347 os.rename(fn_tmp, fn) 

348 

349 if fsize is not None and frx != fsize: 

350 logger.warning( 

351 'HTTP header Content-Length: %i bytes does not match ' 

352 'download size %i bytes' % (fsize, frx)) 

353 

354 logger.info('finished download of %s' % url) 

355 

356 status['nread_files'] += 1 

357 if callable(status_callback): 

358 status_callback(status) 

359 

360 if recursive: 

361 if op.exists(fpath) and not force: 

362 raise PathExists('path %s already exists' % fpath) 

363 

364 files = parse_directory_tree(url) 

365 

366 dsize = 0 

367 for fn in files: 

368 file_url = urljoin(url, fn) 

369 dsize += get_content_length(file_url) or 0 

370 

371 if method == 'calcsize': 

372 return dsize 

373 

374 else: 

375 for fn in files: 

376 file_url = urljoin(url, fn) 

377 download_file(file_url, op.join(fpath, fn)) 

378 

379 else: 

380 status['ntotal_files'] += 1 

381 if callable(status_callback): 

382 status_callback(status) 

383 

384 fsize = get_content_length(url) 

385 if method == 'calcsize': 

386 return fsize 

387 else: 

388 download_file(url, fpath) 

389 

390 except req_HTTPError as e: 

391 logging.warning("http error: %s" % e) 

392 raise DownloadError('could not download file(s) from: %s' % url) 

393 

394 finally: 

395 status['finished'] = True 

396 if callable(status_callback): 

397 status_callback(status) 

398 session.close() 

399 

400 

401def download_file( 

402 url, fpath, username=None, password=None, status_callback=None, 

403 **kwargs): 

404 return _download( 

405 url, fpath, username, password, 

406 recursive=False, 

407 status_callback=status_callback, 

408 **kwargs) 

409 

410 

411def download_dir( 

412 url, fpath, username=None, password=None, status_callback=None, 

413 **kwargs): 

414 

415 return _download( 

416 url, fpath, username, password, 

417 recursive=True, 

418 status_callback=status_callback, 

419 **kwargs) 

420 

421 

422class HPFloatUnavailable(Exception): 

423 pass 

424 

425 

426class dummy_hpfloat(object): 

427 def __init__(self, *args, **kwargs): 

428 raise HPFloatUnavailable( 

429 'NumPy lacks support for float128 or float96 data type on this ' 

430 'platform.') 

431 

432 

433if hasattr(num, 'float128'): 

434 hpfloat = num.float128 

435 

436elif hasattr(num, 'float96'): 

437 hpfloat = num.float96 

438 

439else: 

440 hpfloat = dummy_hpfloat 

441 

442 

443g_time_float = None 

444g_time_dtype = None 

445 

446 

447class TimeFloatSettingError(Exception): 

448 pass 

449 

450 

451def use_high_precision_time(enabled): 

452 ''' 

453 Globally force a specific time handling mode. 

454 

455 See :ref:`High precision time handling mode <time-handling-mode>`. 

456 

457 :param enabled: enable/disable use of high precision time type 

458 :type enabled: bool 

459 

460 This function should be called before handling/reading any time data. 

461 It can only be called once. 

462 

463 Special attention is required when using multiprocessing on a platform 

464 which does not use fork under the hood. In such cases, the desired setting 

465 must be set also in the subprocess. 

466 ''' 

467 _setup_high_precision_time_mode(enabled_app=enabled) 

468 

469 

470def _setup_high_precision_time_mode(enabled_app=False): 

471 global g_time_float 

472 global g_time_dtype 

473 

474 if not (g_time_float is None and g_time_dtype is None): 

475 raise TimeFloatSettingError( 

476 'Cannot set time handling mode: too late, it has already been ' 

477 'fixed by an earlier call.') 

478 

479 from pyrocko import config 

480 

481 conf = config.config() 

482 enabled_config = conf.use_high_precision_time 

483 

484 enabled_env = os.environ.get('PYROCKO_USE_HIGH_PRECISION_TIME', None) 

485 if enabled_env is not None: 

486 try: 

487 enabled_env = int(enabled_env) == 1 

488 except ValueError: 

489 raise TimeFloatSettingError( 

490 'Environment variable PYROCKO_USE_HIGH_PRECISION_TIME ' 

491 'should be set to 0 or 1.') 

492 

493 enabled = enabled_config 

494 mode_from = 'config variable `use_high_precision_time`' 

495 notify = enabled 

496 

497 if enabled_env is not None: 

498 if enabled_env != enabled: 

499 notify = True 

500 enabled = enabled_env 

501 mode_from = 'environment variable `PYROCKO_USE_HIGH_PRECISION_TIME`' 

502 

503 if enabled_app is not None: 

504 if enabled_app != enabled: 

505 notify = True 

506 enabled = enabled_app 

507 mode_from = 'application override' 

508 

509 logger.debug(''' 

510Pyrocko high precision time mode selection (latter override earlier): 

511 config: %s 

512 env: %s 

513 app: %s 

514 -> enabled: %s'''.lstrip() % ( 

515 enabled_config, enabled_env, enabled_app, enabled)) 

516 

517 if notify: 

518 logger.info('Pyrocko high precision time mode %s by %s.' % ( 

519 'activated' if enabled else 'deactivated', 

520 mode_from)) 

521 

522 if enabled: 

523 g_time_float = hpfloat 

524 g_time_dtype = hpfloat 

525 else: 

526 g_time_float = float 

527 g_time_dtype = num.float64 

528 

529 

530def get_time_float(): 

531 ''' 

532 Get the effective float class for timestamps. 

533 

534 See :ref:`High precision time handling mode <time-handling-mode>`. 

535 

536 :returns: :py:class:`float` or :py:class:`hpfloat`, depending on the 

537 current time handling mode 

538 ''' 

539 global g_time_float 

540 

541 if g_time_float is None: 

542 _setup_high_precision_time_mode() 

543 

544 return g_time_float 

545 

546 

547def get_time_dtype(): 

548 ''' 

549 Get effective NumPy float class to handle timestamps. 

550 

551 See :ref:`High precision time handling mode <time-handling-mode>`. 

552 ''' 

553 

554 global g_time_dtype 

555 

556 if g_time_dtype is None: 

557 _setup_high_precision_time_mode() 

558 

559 return g_time_dtype 

560 

561 

562def to_time_float(t): 

563 ''' 

564 Convert float to valid time stamp in the current time handling mode. 

565 

566 See :ref:`High precision time handling mode <time-handling-mode>`. 

567 ''' 

568 return get_time_float()(t) 

569 

570 

571class TimestampTypeError(ValueError): 

572 pass 

573 

574 

575def check_time_class(t, error='raise'): 

576 ''' 

577 Type-check variable against current time handling mode. 

578 

579 See :ref:`High precision time handling mode <time-handling-mode>`. 

580 ''' 

581 

582 if t == 0.0: 

583 return 

584 

585 if not isinstance(t, get_time_float()): 

586 message = ( 

587 'Timestamp %g is of type %s but should be of type %s with ' 

588 'Pyrocko\'s currently selected time handling mode.\n\n' 

589 'See https://pyrocko.org/docs/current/library/reference/util.html' 

590 '#high-precision-time-handling-mode' % ( 

591 t, type(t), get_time_float())) 

592 

593 if error == 'raise': 

594 raise TimestampTypeError(message) 

595 elif error == 'warn': 

596 logger.warning(message) 

597 else: 

598 assert False 

599 

600 

601class Stopwatch(object): 

602 ''' 

603 Simple stopwatch to measure elapsed wall clock time. 

604 

605 Usage:: 

606 

607 s = Stopwatch() 

608 time.sleep(1) 

609 print s() 

610 time.sleep(1) 

611 print s() 

612 ''' 

613 

614 def __init__(self): 

615 self.start = time.time() 

616 

617 def __call__(self): 

618 return time.time() - self.start 

619 

620 

621def wrap(text, line_length=80): 

622 ''' 

623 Paragraph and list-aware wrapping of text. 

624 ''' 

625 

626 text = text.strip('\n') 

627 at_lineend = re.compile(r' *\n') 

628 at_para = re.compile(r'((^|(\n\s*)?\n)(\s+[*] )|\n\s*\n)') 

629 

630 paragraphs = at_para.split(text)[::5] 

631 listindents = at_para.split(text)[4::5] 

632 newlist = at_para.split(text)[3::5] 

633 

634 listindents[0:0] = [None] 

635 listindents.append(True) 

636 newlist.append(None) 

637 

638 det_indent = re.compile(r'^ *') 

639 

640 outlines = [] 

641 for ip, p in enumerate(paragraphs): 

642 if not p: 

643 continue 

644 

645 if listindents[ip] is None: 

646 _indent = det_indent.findall(p)[0] 

647 findent = _indent 

648 else: 

649 findent = listindents[ip] 

650 _indent = ' ' * len(findent) 

651 

652 ll = line_length - len(_indent) 

653 llf = ll 

654 

655 oldlines = [s.strip() for s in at_lineend.split(p.rstrip())] 

656 p1 = ' '.join(oldlines) 

657 possible = re.compile(r'(^.{1,%i}|.{1,%i})( |$)' % (llf, ll)) 

658 for imatch, match in enumerate(possible.finditer(p1)): 

659 parout = match.group(1) 

660 if imatch == 0: 

661 outlines.append(findent + parout) 

662 else: 

663 outlines.append(_indent + parout) 

664 

665 if ip != len(paragraphs)-1 and ( 

666 listindents[ip] is None 

667 or newlist[ip] is not None 

668 or listindents[ip+1] is None): 

669 

670 outlines.append('') 

671 

672 return outlines 

673 

674 

675def ewrap(lines, width=80, indent=''): 

676 lines = list(lines) 

677 if not lines: 

678 return '' 

679 fwidth = max(len(s) for s in lines) 

680 nx = max(1, (80-len(indent)) // (fwidth+1)) 

681 i = 0 

682 rows = [] 

683 while i < len(lines): 

684 rows.append(indent + ' '.join(x.ljust(fwidth) for x in lines[i:i+nx])) 

685 i += nx 

686 

687 return '\n'.join(rows) 

688 

689 

690class BetterHelpFormatter(optparse.IndentedHelpFormatter): 

691 

692 def __init__(self, *args, **kwargs): 

693 optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs) 

694 

695 def format_option(self, option): 

696 ''' 

697 From IndentedHelpFormatter but using a different wrap method. 

698 ''' 

699 

700 help_text_position = 4 + self.current_indent 

701 help_text_width = self.width - help_text_position 

702 

703 opts = self.option_strings[option] 

704 opts = "%*s%s" % (self.current_indent, "", opts) 

705 if option.help: 

706 help_text = self.expand_default(option) 

707 

708 if self.help_position + len(help_text) + 1 <= self.width: 

709 lines = [ 

710 '', 

711 '%-*s %s' % (self.help_position, opts, help_text), 

712 ''] 

713 else: 

714 lines = [''] 

715 lines.append(opts) 

716 lines.append('') 

717 if option.help: 

718 help_lines = wrap(help_text, help_text_width) 

719 lines.extend(["%*s%s" % (help_text_position, "", line) 

720 for line in help_lines]) 

721 lines.append('') 

722 

723 return "\n".join(lines) 

724 

725 def format_description(self, description): 

726 if not description: 

727 return '' 

728 

729 if self.current_indent == 0: 

730 lines = [] 

731 else: 

732 lines = [''] 

733 

734 lines.extend(wrap(description, self.width - self.current_indent)) 

735 if self.current_indent == 0: 

736 lines.append('\n') 

737 

738 return '\n'.join( 

739 ['%*s%s' % (self.current_indent, '', line) for line in lines]) 

740 

741 

742def progressbar(label, maxval): 

743 progressbar_mod = progressbar_module() 

744 if force_dummy_progressbar: 

745 progressbar_mod = dummy_progressbar 

746 

747 widgets = [ 

748 label, ' ', 

749 progressbar_mod.Bar(marker='-', left='[', right=']'), ' ', 

750 progressbar_mod.Percentage(), ' ', 

751 progressbar_mod.ETA()] 

752 

753 pbar = progressbar_mod.ProgressBar(widgets=widgets, maxval=maxval).start() 

754 return pbar 

755 

756 

757def progress_beg(label): 

758 ''' 

759 Notify user that an operation has started. 

760 

761 :param label: name of the operation 

762 

763 To be used in conjuction with :py:func:`progress_end`. 

764 ''' 

765 

766 sys.stderr.write(label) 

767 sys.stderr.flush() 

768 

769 

770def progress_end(label=''): 

771 ''' 

772 Notify user that an operation has ended. 

773 

774 :param label: name of the operation 

775 

776 To be used in conjuction with :py:func:`progress_beg`. 

777 ''' 

778 

779 sys.stderr.write(' done. %s\n' % label) 

780 sys.stderr.flush() 

781 

782 

783class ArangeError(Exception): 

784 pass 

785 

786 

787def arange2(start, stop, step, dtype=float, epsilon=1e-6, error='raise'): 

788 ''' 

789 Return evenly spaced numbers over a specified interval. 

790 

791 Like :py:func:`numpy.arange` but returning floating point numbers by 

792 default and with defined behaviour when stepsize is inconsistent with 

793 interval bounds. It is considered inconsistent if the difference between 

794 the closest multiple of ``step`` and ``stop`` is larger than ``epsilon * 

795 step``. Inconsistencies are handled according to the ``error`` parameter. 

796 If it is set to ``'raise'`` an exception of type :py:exc:`ArangeError` is 

797 raised. If it is set to ``'round'``, ``'floor'``, or ``'ceil'``, ``stop`` 

798 is silently changed to the closest, the next smaller, or next larger 

799 multiple of ``step``, respectively. 

800 ''' 

801 

802 assert error in ('raise', 'round', 'floor', 'ceil') 

803 

804 start = dtype(start) 

805 stop = dtype(stop) 

806 step = dtype(step) 

807 

808 rnd = {'floor': math.floor, 'ceil': math.ceil}.get(error, round) 

809 

810 n = int(rnd((stop - start) / step)) + 1 

811 stop_check = start + (n-1) * step 

812 

813 if error == 'raise' and abs(stop_check - stop) > step * epsilon: 

814 raise ArangeError( 

815 'inconsistent range specification: start=%g, stop=%g, step=%g' 

816 % (start, stop, step)) 

817 

818 x = num.arange(n, dtype=dtype) 

819 x *= step 

820 x += start 

821 return x 

822 

823 

824def polylinefit(x, y, n_or_xnodes): 

825 ''' 

826 Fit piece-wise linear function to data. 

827 

828 :param x,y: arrays with coordinates of data 

829 :param n_or_xnodes: int, number of segments or x coordinates of polyline 

830 

831 :returns: `(xnodes, ynodes, rms_error)` arrays with coordinates of 

832 polyline, root-mean-square error 

833 ''' 

834 

835 x = num.asarray(x) 

836 y = num.asarray(y) 

837 

838 if isinstance(n_or_xnodes, int): 

839 n = n_or_xnodes 

840 xmin = x.min() 

841 xmax = x.max() 

842 xnodes = num.linspace(xmin, xmax, n+1) 

843 else: 

844 xnodes = num.asarray(n_or_xnodes) 

845 n = xnodes.size - 1 

846 

847 assert len(x) == len(y) 

848 assert n > 0 

849 

850 ndata = len(x) 

851 a = num.zeros((ndata+(n-1), n*2)) 

852 for i in range(n): 

853 xmin_block = xnodes[i] 

854 xmax_block = xnodes[i+1] 

855 if i == n-1: # don't loose last point 

856 indices = num.where( 

857 num.logical_and(xmin_block <= x, x <= xmax_block))[0] 

858 else: 

859 indices = num.where( 

860 num.logical_and(xmin_block <= x, x < xmax_block))[0] 

861 

862 a[indices, i*2] = x[indices] 

863 a[indices, i*2+1] = 1.0 

864 

865 w = float(ndata)*100. 

866 if i < n-1: 

867 a[ndata+i, i*2] = xmax_block*w 

868 a[ndata+i, i*2+1] = 1.0*w 

869 a[ndata+i, i*2+2] = -xmax_block*w 

870 a[ndata+i, i*2+3] = -1.0*w 

871 

872 d = num.concatenate((y, num.zeros(n-1))) 

873 model = num.linalg.lstsq(a, d, rcond=-1)[0].reshape((n, 2)) 

874 

875 ynodes = num.zeros(n+1) 

876 ynodes[:n] = model[:, 0]*xnodes[:n] + model[:, 1] 

877 ynodes[1:] += model[:, 0]*xnodes[1:] + model[:, 1] 

878 ynodes[1:n] *= 0.5 

879 

880 rms_error = num.sqrt(num.mean((num.interp(x, xnodes, ynodes) - y)**2)) 

881 

882 return xnodes, ynodes, rms_error 

883 

884 

885def plf_integrate_piecewise(x_edges, x, y): 

886 ''' 

887 Calculate definite integral of piece-wise linear function on intervals. 

888 

889 Use trapezoidal rule to calculate definite integral of a piece-wise linear 

890 function for a series of consecutive intervals. ``x_edges`` and ``x`` must 

891 be sorted. 

892 

893 :param x_edges: array with edges of the intervals 

894 :param x,y: arrays with coordinates of piece-wise linear function's 

895 control points 

896 ''' 

897 

898 x_all = num.concatenate((x, x_edges)) 

899 ii = num.argsort(x_all) 

900 y_edges = num.interp(x_edges, x, y) 

901 y_all = num.concatenate((y, y_edges)) 

902 xs = x_all[ii] 

903 ys = y_all[ii] 

904 y_all[ii[1:]] = num.cumsum((xs[1:] - xs[:-1]) * 0.5 * (ys[1:] + ys[:-1])) 

905 return num.diff(y_all[-len(y_edges):]) 

906 

907 

908def diff_fd_1d_4o(dt, data): 

909 ''' 

910 Approximate first derivative of an array (forth order, central FD). 

911 

912 :param dt: sampling interval 

913 :param data: NumPy array with data samples 

914 

915 :returns: NumPy array with same shape as input 

916 

917 Interior points are approximated to fourth order, edge points to first 

918 order right- or left-sided respectively, points next to edge to second 

919 order central. 

920 ''' 

921 import scipy.signal 

922 

923 ddata = num.empty_like(data, dtype=float) 

924 

925 if data.size >= 5: 

926 ddata[2:-2] = scipy.signal.lfilter( 

927 [-1., +8., 0., -8., 1.], [1.], data)[4:] / (12.*dt) 

928 

929 if data.size >= 3: 

930 ddata[1] = (data[2] - data[0]) / (2. * dt) 

931 ddata[-2] = (data[-1] - data[-3]) / (2. * dt) 

932 

933 if data.size >= 2: 

934 ddata[0] = (data[1] - data[0]) / dt 

935 ddata[-1] = (data[-1] - data[-2]) / dt 

936 

937 if data.size == 1: 

938 ddata[0] = 0.0 

939 

940 return ddata 

941 

942 

943def diff_fd_1d_2o(dt, data): 

944 ''' 

945 Approximate first derivative of an array (second order, central FD). 

946 

947 :param dt: sampling interval 

948 :param data: NumPy array with data samples 

949 

950 :returns: NumPy array with same shape as input 

951 

952 Interior points are approximated to second order, edge points to first 

953 order right- or left-sided respectively. 

954 

955 Uses :py:func:`numpy.gradient`. 

956 ''' 

957 

958 return num.gradient(data, dt) 

959 

960 

961def diff_fd_2d_4o(dt, data): 

962 ''' 

963 Approximate second derivative of an array (forth order, central FD). 

964 

965 :param dt: sampling interval 

966 :param data: NumPy array with data samples 

967 

968 :returns: NumPy array with same shape as input 

969 

970 Interior points are approximated to fourth order, next-to-edge points to 

971 second order, edge points repeated. 

972 ''' 

973 import scipy.signal 

974 

975 ddata = num.empty_like(data, dtype=float) 

976 

977 if data.size >= 5: 

978 ddata[2:-2] = scipy.signal.lfilter( 

979 [-1., +16., -30., +16., -1.], [1.], data)[4:] / (12.*dt**2) 

980 

981 if data.size >= 3: 

982 ddata[:2] = (data[2] - 2.0 * data[1] + data[0]) / dt**2 

983 ddata[-2:] = (data[-1] - 2.0 * data[-2] + data[-3]) / dt**2 

984 

985 if data.size < 3: 

986 ddata[:] = 0.0 

987 

988 return ddata 

989 

990 

991def diff_fd_2d_2o(dt, data): 

992 ''' 

993 Approximate second derivative of an array (second order, central FD). 

994 

995 :param dt: sampling interval 

996 :param data: NumPy array with data samples 

997 

998 :returns: NumPy array with same shape as input 

999 

1000 Interior points are approximated to second order, edge points repeated. 

1001 ''' 

1002 import scipy.signal 

1003 

1004 ddata = num.empty_like(data, dtype=float) 

1005 

1006 if data.size >= 3: 

1007 ddata[1:-1] = scipy.signal.lfilter( 

1008 [1., -2., 1.], [1.], data)[2:] / (dt**2) 

1009 

1010 ddata[0] = ddata[1] 

1011 ddata[-1] = ddata[-2] 

1012 

1013 if data.size < 3: 

1014 ddata[:] = 0.0 

1015 

1016 return ddata 

1017 

1018 

1019def diff_fd(n, order, dt, data): 

1020 ''' 

1021 Approximate 1st or 2nd derivative of an array. 

1022 

1023 :param n: 1 for first derivative, 2 for second 

1024 :param order: order of the approximation 2 and 4 are supported 

1025 :param dt: sampling interval 

1026 :param data: NumPy array with data samples 

1027 

1028 :returns: NumPy array with same shape as input 

1029 

1030 This is a frontend to the functions :py:func:`diff_fd_1d_2o`, 

1031 :py:func:`diff_fd_1d_4o`, :py:func:`diff_fd_2d_2o`, and 

1032 :py:func:`diff_fd_2d_4o`. 

1033 

1034 Raises :py:exc:`ValueError` for unsupported `n` or `order`. 

1035 ''' 

1036 

1037 funcs = { 

1038 1: {2: diff_fd_1d_2o, 4: diff_fd_1d_4o}, 

1039 2: {2: diff_fd_2d_2o, 4: diff_fd_2d_4o}} 

1040 

1041 try: 

1042 funcs_n = funcs[n] 

1043 except KeyError: 

1044 raise ValueError( 

1045 'pyrocko.util.diff_fd: ' 

1046 'Only 1st and 2sd derivatives are supported.') 

1047 

1048 try: 

1049 func = funcs_n[order] 

1050 except KeyError: 

1051 raise ValueError( 

1052 'pyrocko.util.diff_fd: ' 

1053 'Order %i is not supported for %s derivative. Supported: %s' % ( 

1054 order, ['', '1st', '2nd'][n], 

1055 ', '.join('%i' % order for order in sorted(funcs_n.keys())))) 

1056 

1057 return func(dt, data) 

1058 

1059 

1060class GlobalVars(object): 

1061 reuse_store = dict() 

1062 decitab_nmax = 0 

1063 decitab = {} 

1064 decimate_fir_coeffs = {} 

1065 decimate_fir_remez_coeffs = {} 

1066 decimate_iir_coeffs = {} 

1067 re_frac = None 

1068 

1069 

1070def decimate_coeffs(q, n=None, ftype='iir'): 

1071 

1072 q = int(q) 

1073 

1074 if n is None: 

1075 if ftype == 'fir': 

1076 n = 30 

1077 elif ftype == 'fir-remez': 

1078 n = 40*q 

1079 else: 

1080 n = 8 

1081 

1082 if ftype == 'fir': 

1083 coeffs = GlobalVars.decimate_fir_coeffs 

1084 if (n, 1./q) not in coeffs: 

1085 coeffs[n, 1./q] = signal.firwin(n+1, .75/q, window='hamming') 

1086 

1087 b = coeffs[n, 1./q] 

1088 return b, [1.], n 

1089 

1090 elif ftype == 'fir-remez': 

1091 coeffs = GlobalVars.decimate_fir_remez_coeffs 

1092 if (n, 1./q) not in coeffs: 

1093 coeffs[n, 1./q] = signal.remez( 

1094 n+1, (0., .75/q, 1./q, 1.), 

1095 (1., 0.), Hz=2, weight=(1, 50)) 

1096 b = coeffs[n, 1./q] 

1097 return b, [1.], n 

1098 

1099 else: 

1100 coeffs = GlobalVars.decimate_iir_coeffs 

1101 if (n, 0.05, 0.8/q) not in coeffs: 

1102 coeffs[n, 0.05, 0.8/q] = signal.cheby1(n, 0.05, 0.8/q) 

1103 

1104 b, a = coeffs[n, 0.05, 0.8/q] 

1105 return b, a, n 

1106 

1107 

1108def decimate(x, q, n=None, ftype='iir', zi=None, ioff=0): 

1109 ''' 

1110 Downsample the signal x by an integer factor q, using an order n filter 

1111 

1112 By default, an order 8 Chebyshev type I filter is used or a 30 point FIR 

1113 filter with hamming window if ftype is 'fir'. 

1114 

1115 :param x: the signal to be downsampled (1D :class:`numpy.ndarray`) 

1116 :param q: the downsampling factor 

1117 :param n: order of the filter (1 less than the length of the filter for a 

1118 `fir` filter) 

1119 :param ftype: type of the filter; can be `iir`, `fir` or `fir-remez` 

1120 

1121 :returns: the downsampled signal (1D :class:`numpy.ndarray`) 

1122 

1123 ''' 

1124 

1125 b, a, n = decimate_coeffs(q, n, ftype) 

1126 

1127 if zi is None or zi is True: 

1128 zi_ = num.zeros(max(len(a), len(b))-1, dtype=float) 

1129 else: 

1130 zi_ = zi 

1131 

1132 y, zf = signal.lfilter(b, a, x, zi=zi_) 

1133 

1134 if zi is not None: 

1135 return y[n//2+ioff::q].copy(), zf 

1136 else: 

1137 return y[n//2+ioff::q].copy() 

1138 

1139 

1140class UnavailableDecimation(Exception): 

1141 ''' 

1142 Exception raised by :py:func:`decitab` for unavailable decimation factors. 

1143 ''' 

1144 

1145 pass 

1146 

1147 

1148def gcd(a, b, epsilon=1e-7): 

1149 ''' 

1150 Greatest common divisor. 

1151 ''' 

1152 

1153 while b > epsilon*a: 

1154 a, b = b, a % b 

1155 

1156 return a 

1157 

1158 

1159def lcm(a, b): 

1160 ''' 

1161 Least common multiple. 

1162 ''' 

1163 

1164 return a*b // gcd(a, b) 

1165 

1166 

1167def mk_decitab(nmax=100): 

1168 ''' 

1169 Make table with decimation sequences. 

1170 

1171 Decimation from one sampling rate to a lower one is achieved by a 

1172 successive application of :py:func:`decimation` with small integer 

1173 downsampling factors (because using large downampling factors can make the 

1174 decimation unstable or slow). This function sets up a table with downsample 

1175 sequences for factors up to ``nmax``. 

1176 ''' 

1177 

1178 tab = GlobalVars.decitab 

1179 for i in range(1, 10): 

1180 for j in range(1, i+1): 

1181 for k in range(1, j+1): 

1182 for l_ in range(1, k+1): 

1183 for m in range(1, l_+1): 

1184 p = i*j*k*l_*m 

1185 if p > nmax: 

1186 break 

1187 if p not in tab: 

1188 tab[p] = (i, j, k, l_, m) 

1189 if i*j*k*l_ > nmax: 

1190 break 

1191 if i*j*k > nmax: 

1192 break 

1193 if i*j > nmax: 

1194 break 

1195 if i > nmax: 

1196 break 

1197 

1198 GlobalVars.decitab_nmax = nmax 

1199 

1200 

1201def zfmt(n): 

1202 return '%%0%ii' % (int(math.log10(n - 1)) + 1) 

1203 

1204 

1205def _year_to_time(year): 

1206 tt = (year, 1, 1, 0, 0, 0) 

1207 return to_time_float(calendar.timegm(tt)) 

1208 

1209 

1210def _working_year(year): 

1211 try: 

1212 tt = (year, 1, 1, 0, 0, 0) 

1213 t = calendar.timegm(tt) 

1214 tt2_ = time.gmtime(t) 

1215 tt2 = tuple(tt2_)[:6] 

1216 if tt != tt2: 

1217 return False 

1218 

1219 s = '%i-01-01 00:00:00' % year 

1220 s2 = time.strftime('%Y-%m-%d %H:%M:%S', tt2_) 

1221 if s != s2: 

1222 return False 

1223 

1224 t3 = str_to_time(s2, format='%Y-%m-%d %H:%M:%S') 

1225 s3 = time_to_str(t3, format='%Y-%m-%d %H:%M:%S') 

1226 if s3 != s2: 

1227 return False 

1228 

1229 except Exception: 

1230 return False 

1231 

1232 return True 

1233 

1234 

1235def working_system_time_range(year_min_lim=None, year_max_lim=None): 

1236 ''' 

1237 Check time range supported by the systems's time conversion functions. 

1238 

1239 Returns system time stamps of start of year of first/last fully supported 

1240 year span. If this is before 1900 or after 2100, return first/last century 

1241 which is fully supported. 

1242 

1243 :returns: ``(tmin, tmax, year_min, year_max)`` 

1244 ''' 

1245 

1246 year0 = 2000 

1247 year_min = year0 

1248 year_max = year0 

1249 

1250 itests = list(range(101)) 

1251 for i in range(19): 

1252 itests.append(200 + i*100) 

1253 

1254 for i in itests: 

1255 year = year0 - i 

1256 if year_min_lim is not None and year < year_min_lim: 

1257 year_min = year_min_lim 

1258 break 

1259 elif not _working_year(year): 

1260 break 

1261 else: 

1262 year_min = year 

1263 

1264 for i in itests: 

1265 year = year0 + i 

1266 if year_max_lim is not None and year > year_max_lim: 

1267 year_max = year_max_lim 

1268 break 

1269 elif not _working_year(year + 1): 

1270 break 

1271 else: 

1272 year_max = year 

1273 

1274 return ( 

1275 _year_to_time(year_min), 

1276 _year_to_time(year_max), 

1277 year_min, year_max) 

1278 

1279 

1280g_working_system_time_range = None 

1281 

1282 

1283def get_working_system_time_range(): 

1284 ''' 

1285 Caching variant of :py:func:`working_system_time_range`. 

1286 ''' 

1287 

1288 global g_working_system_time_range 

1289 if g_working_system_time_range is None: 

1290 g_working_system_time_range = working_system_time_range() 

1291 

1292 return g_working_system_time_range 

1293 

1294 

1295def is_working_time(t): 

1296 tmin, tmax, _, _ = get_working_system_time_range() 

1297 return tmin <= t <= tmax 

1298 

1299 

1300def julian_day_of_year(timestamp): 

1301 ''' 

1302 Get the day number after the 1st of January of year in ``timestamp``. 

1303 

1304 :returns: day number as int 

1305 ''' 

1306 

1307 return time.gmtime(int(timestamp)).tm_yday 

1308 

1309 

1310def hour_start(timestamp): 

1311 ''' 

1312 Get beginning of hour for any point in time. 

1313 

1314 :param timestamp: time instant as system timestamp (in seconds) 

1315 

1316 :returns: instant of hour start as system timestamp 

1317 ''' 

1318 

1319 tt = time.gmtime(int(timestamp)) 

1320 tts = tt[0:4] + (0, 0) 

1321 return to_time_float(calendar.timegm(tts)) 

1322 

1323 

1324def day_start(timestamp): 

1325 ''' 

1326 Get beginning of day for any point in time. 

1327 

1328 :param timestamp: time instant as system timestamp (in seconds) 

1329 

1330 :returns: instant of day start as system timestamp 

1331 ''' 

1332 

1333 tt = time.gmtime(int(timestamp)) 

1334 tts = tt[0:3] + (0, 0, 0) 

1335 return to_time_float(calendar.timegm(tts)) 

1336 

1337 

1338def month_start(timestamp): 

1339 ''' 

1340 Get beginning of month for any point in time. 

1341 

1342 :param timestamp: time instant as system timestamp (in seconds) 

1343 

1344 :returns: instant of month start as system timestamp 

1345 ''' 

1346 

1347 tt = time.gmtime(int(timestamp)) 

1348 tts = tt[0:2] + (1, 0, 0, 0) 

1349 return to_time_float(calendar.timegm(tts)) 

1350 

1351 

1352def year_start(timestamp): 

1353 ''' 

1354 Get beginning of year for any point in time. 

1355 

1356 :param timestamp: time instant as system timestamp (in seconds) 

1357 

1358 :returns: instant of year start as system timestamp 

1359 ''' 

1360 

1361 tt = time.gmtime(int(timestamp)) 

1362 tts = tt[0:1] + (1, 1, 0, 0, 0) 

1363 return to_time_float(calendar.timegm(tts)) 

1364 

1365 

1366def iter_days(tmin, tmax): 

1367 ''' 

1368 Yields begin and end of days until given time span is covered. 

1369 

1370 :param tmin,tmax: input time span 

1371 

1372 :yields: tuples with (begin, end) of days as system timestamps 

1373 ''' 

1374 

1375 t = day_start(tmin) 

1376 while t < tmax: 

1377 tend = day_start(t + 26*60*60) 

1378 yield t, tend 

1379 t = tend 

1380 

1381 

1382def iter_months(tmin, tmax): 

1383 ''' 

1384 Yields begin and end of months until given time span is covered. 

1385 

1386 :param tmin,tmax: input time span 

1387 

1388 :yields: tuples with (begin, end) of months as system timestamps 

1389 ''' 

1390 

1391 t = month_start(tmin) 

1392 while t < tmax: 

1393 tend = month_start(t + 24*60*60*33) 

1394 yield t, tend 

1395 t = tend 

1396 

1397 

1398def iter_years(tmin, tmax): 

1399 ''' 

1400 Yields begin and end of years until given time span is covered. 

1401 

1402 :param tmin,tmax: input time span 

1403 

1404 :yields: tuples with (begin, end) of years as system timestamps 

1405 ''' 

1406 

1407 t = year_start(tmin) 

1408 while t < tmax: 

1409 tend = year_start(t + 24*60*60*369) 

1410 yield t, tend 

1411 t = tend 

1412 

1413 

1414def today(): 

1415 return day_start(time.time()) 

1416 

1417 

1418def tomorrow(): 

1419 return day_start(time.time() + 24*60*60) 

1420 

1421 

1422def decitab(n): 

1423 ''' 

1424 Get integer decimation sequence for given downampling factor. 

1425 

1426 :param n: target decimation factor 

1427 

1428 :returns: tuple with downsampling sequence 

1429 ''' 

1430 

1431 if n > GlobalVars.decitab_nmax: 

1432 mk_decitab(n*2) 

1433 if n not in GlobalVars.decitab: 

1434 raise UnavailableDecimation('ratio = %g' % n) 

1435 return GlobalVars.decitab[n] 

1436 

1437 

1438def ctimegm(s, format="%Y-%m-%d %H:%M:%S"): 

1439 ''' 

1440 Convert string representing UTC time to system time. 

1441 

1442 :param s: string to be interpreted 

1443 :param format: format string passed to :py:func:`strptime` 

1444 

1445 :returns: system time stamp 

1446 

1447 Interpretes string with format ``'%Y-%m-%d %H:%M:%S'``, using strptime. 

1448 

1449 .. note:: 

1450 This function is to be replaced by :py:func:`str_to_time`. 

1451 ''' 

1452 

1453 return calendar.timegm(time.strptime(s, format)) 

1454 

1455 

1456def gmctime(t, format="%Y-%m-%d %H:%M:%S"): 

1457 ''' 

1458 Get string representation from system time, UTC. 

1459 

1460 Produces string with format ``'%Y-%m-%d %H:%M:%S'``, using strftime. 

1461 

1462 .. note:: 

1463 This function is to be repaced by :py:func:`time_to_str`. 

1464 ''' 

1465 

1466 return time.strftime(format, time.gmtime(t)) 

1467 

1468 

1469def gmctime_v(t, format="%a, %d %b %Y %H:%M:%S"): 

1470 ''' 

1471 Get string representation from system time, UTC. Same as 

1472 :py:func:`gmctime` but with a more verbose default format. 

1473 

1474 .. note:: 

1475 This function is to be replaced by :py:func:`time_to_str`. 

1476 ''' 

1477 

1478 return time.strftime(format, time.gmtime(t)) 

1479 

1480 

1481def gmctime_fn(t, format="%Y-%m-%d_%H-%M-%S"): 

1482 ''' 

1483 Get string representation from system time, UTC. Same as 

1484 :py:func:`gmctime` but with a default usable in filenames. 

1485 

1486 .. note:: 

1487 This function is to be replaced by :py:func:`time_to_str`. 

1488 ''' 

1489 

1490 return time.strftime(format, time.gmtime(t)) 

1491 

1492 

1493class TimeStrError(Exception): 

1494 pass 

1495 

1496 

1497class FractionalSecondsMissing(TimeStrError): 

1498 ''' 

1499 Exception raised by :py:func:`str_to_time` when the given string lacks 

1500 fractional seconds. 

1501 ''' 

1502 

1503 pass 

1504 

1505 

1506class FractionalSecondsWrongNumberOfDigits(TimeStrError): 

1507 ''' 

1508 Exception raised by :py:func:`str_to_time` when the given string has an 

1509 incorrect number of digits in the fractional seconds part. 

1510 ''' 

1511 

1512 pass 

1513 

1514 

1515def _endswith_n(s, endings): 

1516 for ix, x in enumerate(endings): 

1517 if s.endswith(x): 

1518 return ix 

1519 return -1 

1520 

1521 

1522def str_to_time(s, format='%Y-%m-%d %H:%M:%S.OPTFRAC'): 

1523 ''' 

1524 Convert string representing UTC time to floating point system time. 

1525 

1526 :param s: string representing UTC time 

1527 :param format: time string format 

1528 :returns: system time stamp as floating point value 

1529 

1530 Uses the semantics of :py:func:`time.strptime` but allows for fractional 

1531 seconds. If the format ends with ``'.FRAC'``, anything after a dot is 

1532 interpreted as fractional seconds. If the format ends with ``'.OPTFRAC'``, 

1533 the fractional part, including the dot is made optional. The latter has the 

1534 consequence, that the time strings and the format may not contain any other 

1535 dots. If the format ends with ``'.xFRAC'`` where x is 1, 2, or 3, it is 

1536 ensured, that exactly that number of digits are present in the fractional 

1537 seconds. 

1538 ''' 

1539 

1540 time_float = get_time_float() 

1541 

1542 if util_ext is not None: 

1543 try: 

1544 t, tfrac = util_ext.stt(s, format) 

1545 except util_ext.UtilExtError as e: 

1546 raise TimeStrError( 

1547 '%s, string=%s, format=%s' % (str(e), s, format)) 

1548 

1549 return time_float(t)+tfrac 

1550 

1551 fracsec = 0. 

1552 fixed_endings = '.FRAC', '.1FRAC', '.2FRAC', '.3FRAC' 

1553 

1554 iend = _endswith_n(format, fixed_endings) 

1555 if iend != -1: 

1556 dotpos = s.rfind('.') 

1557 if dotpos == -1: 

1558 raise FractionalSecondsMissing( 

1559 'string=%s, format=%s' % (s, format)) 

1560 

1561 if iend > 0 and iend != (len(s)-dotpos-1): 

1562 raise FractionalSecondsWrongNumberOfDigits( 

1563 'string=%s, format=%s' % (s, format)) 

1564 

1565 format = format[:-len(fixed_endings[iend])] 

1566 fracsec = float(s[dotpos:]) 

1567 s = s[:dotpos] 

1568 

1569 elif format.endswith('.OPTFRAC'): 

1570 dotpos = s.rfind('.') 

1571 format = format[:-8] 

1572 if dotpos != -1 and len(s[dotpos:]) > 1: 

1573 fracsec = float(s[dotpos:]) 

1574 

1575 if dotpos != -1: 

1576 s = s[:dotpos] 

1577 

1578 try: 

1579 return time_float(calendar.timegm(time.strptime(s, format))) \ 

1580 + fracsec 

1581 except ValueError as e: 

1582 raise TimeStrError('%s, string=%s, format=%s' % (str(e), s, format)) 

1583 

1584 

1585stt = str_to_time 

1586 

1587 

1588def str_to_time_fillup(s): 

1589 ''' 

1590 Default :py:func:`str_to_time` with filling in of missing values. 

1591 

1592 Allows e.g. `'2010-01-01 00:00:00'` as `'2010-01-01 00:00'`, 

1593 `'2010-01-01 00'`, ..., or `'2010'`. 

1594 ''' 

1595 

1596 if len(s) in (4, 7, 10, 13, 16): 

1597 s += '0000-01-01 00:00:00'[len(s):] 

1598 

1599 return str_to_time(s) 

1600 

1601 

1602def time_to_str(t, format='%Y-%m-%d %H:%M:%S.3FRAC'): 

1603 ''' 

1604 Get string representation for floating point system time. 

1605 

1606 :param t: floating point system time 

1607 :param format: time string format 

1608 :returns: string representing UTC time 

1609 

1610 Uses the semantics of :py:func:`time.strftime` but additionally allows for 

1611 fractional seconds. If ``format`` contains ``'.xFRAC'``, where ``x`` is a 

1612 digit between 1 and 9, this is replaced with the fractional part of ``t`` 

1613 with ``x`` digits precision. 

1614 ''' 

1615 

1616 if pyrocko.grumpy > 0: 

1617 check_time_class(t, 'warn' if pyrocko.grumpy == 1 else 'raise') 

1618 

1619 if isinstance(format, int): 

1620 if format > 0: 

1621 format = '%Y-%m-%d %H:%M:%S.' + '%iFRAC' % format 

1622 else: 

1623 format = '%Y-%m-%d %H:%M:%S' 

1624 

1625 if util_ext is not None: 

1626 t0 = num.floor(t) 

1627 try: 

1628 return util_ext.tts(int(t0), float(t - t0), format) 

1629 except util_ext.UtilExtError as e: 

1630 raise TimeStrError( 

1631 '%s, timestamp=%f, format=%s' % (str(e), t, format)) 

1632 

1633 if not GlobalVars.re_frac: 

1634 GlobalVars.re_frac = re.compile(r'\.[1-9]FRAC') 

1635 GlobalVars.frac_formats = dict( 

1636 [('.%sFRAC' % x, '%.'+x+'f') for x in '123456789']) 

1637 

1638 ts = float(num.floor(t)) 

1639 tfrac = t-ts 

1640 

1641 m = GlobalVars.re_frac.search(format) 

1642 if m: 

1643 sfrac = (GlobalVars.frac_formats[m.group(0)] % tfrac) 

1644 if sfrac[0] == '1': 

1645 ts += 1. 

1646 

1647 format, nsub = GlobalVars.re_frac.subn(sfrac[1:], format, 1) 

1648 

1649 return time.strftime(format, time.gmtime(ts)) 

1650 

1651 

1652tts = time_to_str 

1653 

1654 

1655def mystrftime(fmt=None, tt=None, milliseconds=0): 

1656 

1657 if fmt is None: 

1658 fmt = '%Y-%m-%d %H:%M:%S .%r' 

1659 

1660 if tt is None: 

1661 tt = time.time() 

1662 

1663 fmt = fmt.replace('%r', '%03i' % int(round(milliseconds))) 

1664 fmt = fmt.replace('%u', '%06i' % int(round(milliseconds*1000))) 

1665 fmt = fmt.replace('%n', '%09i' % int(round(milliseconds*1000000))) 

1666 return time.strftime(fmt, tt) 

1667 

1668 

1669def gmtime_x(timestamp): 

1670 etimestamp = float(num.floor(timestamp)) 

1671 tt = time.gmtime(etimestamp) 

1672 ms = (timestamp-etimestamp)*1000 

1673 return tt, ms 

1674 

1675 

1676def plural_s(n): 

1677 if not isinstance(n, int): 

1678 n = len(n) 

1679 

1680 return 's' if n != 1 else '' 

1681 

1682 

1683def ensuredirs(dst): 

1684 ''' 

1685 Create all intermediate path components for a target path. 

1686 

1687 :param dst: target path 

1688 

1689 The leaf part of the target path is not created (use :py:func:`ensuredir` 

1690 if a the target path is a directory to be created). 

1691 ''' 

1692 

1693 d, x = os.path.split(dst.rstrip(os.sep)) 

1694 dirs = [] 

1695 while d and not os.path.exists(d): 

1696 dirs.append(d) 

1697 d, x = os.path.split(d) 

1698 

1699 dirs.reverse() 

1700 

1701 for d in dirs: 

1702 try: 

1703 os.mkdir(d) 

1704 except OSError as e: 

1705 if not e.errno == errno.EEXIST: 

1706 raise 

1707 

1708 

1709def ensuredir(dst): 

1710 ''' 

1711 Create directory and all intermediate path components to it as needed. 

1712 

1713 :param dst: directory name 

1714 

1715 Nothing is done if the given target already exists. 

1716 ''' 

1717 

1718 if os.path.exists(dst): 

1719 return 

1720 

1721 dst.rstrip(os.sep) 

1722 

1723 ensuredirs(dst) 

1724 try: 

1725 os.mkdir(dst) 

1726 except OSError as e: 

1727 if not e.errno == errno.EEXIST: 

1728 raise 

1729 

1730 

1731def reuse(x): 

1732 ''' 

1733 Get unique instance of an object. 

1734 

1735 :param x: hashable object 

1736 :returns: reference to x or an equivalent object 

1737 

1738 Cache object ``x`` in a global dict for reuse, or if x already 

1739 is in that dict, return a reference to it. 

1740 ''' 

1741 

1742 grs = GlobalVars.reuse_store 

1743 if x not in grs: 

1744 grs[x] = x 

1745 return grs[x] 

1746 

1747 

1748def deuse(x): 

1749 grs = GlobalVars.reuse_store 

1750 if x in grs: 

1751 del grs[x] 

1752 

1753 

1754class Anon(object): 

1755 ''' 

1756 Dict-to-object utility. 

1757 

1758 Any given arguments are stored as attributes. 

1759 

1760 Example:: 

1761 

1762 a = Anon(x=1, y=2) 

1763 print a.x, a.y 

1764 ''' 

1765 

1766 def __init__(self, **dict): 

1767 for k in dict: 

1768 self.__dict__[k] = dict[k] 

1769 

1770 

1771def iter_select_files( 

1772 paths, 

1773 include=None, 

1774 exclude=None, 

1775 selector=None, 

1776 show_progress=True, 

1777 pass_through=None): 

1778 

1779 ''' 

1780 Recursively select files (generator variant). 

1781 

1782 See :py:func:`select_files`. 

1783 ''' 

1784 

1785 if show_progress: 

1786 progress_beg('selecting files...') 

1787 

1788 ngood = 0 

1789 check_include = None 

1790 if include is not None: 

1791 rinclude = re.compile(include) 

1792 

1793 def check_include(path): 

1794 m = rinclude.search(path) 

1795 if not m: 

1796 return False 

1797 

1798 if selector is None: 

1799 return True 

1800 

1801 infos = Anon(**m.groupdict()) 

1802 return selector(infos) 

1803 

1804 check_exclude = None 

1805 if exclude is not None: 

1806 rexclude = re.compile(exclude) 

1807 

1808 def check_exclude(path): 

1809 return not bool(rexclude.search(path)) 

1810 

1811 if check_include and check_exclude: 

1812 

1813 def check(path): 

1814 return check_include(path) and check_exclude(path) 

1815 

1816 elif check_include: 

1817 check = check_include 

1818 

1819 elif check_exclude: 

1820 check = check_exclude 

1821 

1822 else: 

1823 check = None 

1824 

1825 if isinstance(paths, str): 

1826 paths = [paths] 

1827 

1828 for path in paths: 

1829 if pass_through and pass_through(path): 

1830 if check is None or check(path): 

1831 yield path 

1832 

1833 elif os.path.isdir(path): 

1834 for (dirpath, dirnames, filenames) in os.walk(path): 

1835 dirnames.sort() 

1836 filenames.sort() 

1837 for filename in filenames: 

1838 path = op.join(dirpath, filename) 

1839 if check is None or check(path): 

1840 yield os.path.abspath(path) 

1841 ngood += 1 

1842 else: 

1843 if check is None or check(path): 

1844 yield os.path.abspath(path) 

1845 ngood += 1 

1846 

1847 if show_progress: 

1848 progress_end('%i file%s selected.' % (ngood, plural_s(ngood))) 

1849 

1850 

1851def select_files( 

1852 paths, include=None, exclude=None, selector=None, show_progress=True, 

1853 regex=None): 

1854 

1855 ''' 

1856 Recursively select files. 

1857 

1858 :param paths: entry path names 

1859 :param include: pattern for conditional inclusion 

1860 :param exclude: pattern for conditional exclusion 

1861 :param selector: callback for conditional inclusion 

1862 :param show_progress: if True, indicate start and stop of processing 

1863 :param regex: alias for ``include`` (backwards compatibility) 

1864 :returns: list of path names 

1865 

1866 Recursively finds all files under given entry points ``paths``. If 

1867 parameter ``include`` is a regular expression, only files with matching 

1868 path names are included. If additionally parameter ``selector`` is given a 

1869 callback function, only files for which the callback returns ``True`` are 

1870 included. The callback should take a single argument. The callback is 

1871 called with a single argument, an object, having as attributes, any named 

1872 groups given in ``include``. 

1873 

1874 Examples 

1875 

1876 To find all files ending in ``'.mseed'`` or ``'.msd'``:: 

1877 

1878 select_files(paths, 

1879 include=r'\\.(mseed|msd)$') 

1880 

1881 To find all files ending with ``'$Year.$DayOfYear'``, having set 2009 for 

1882 the year:: 

1883 

1884 select_files(paths, 

1885 include=r'(?P<year>\\d\\d\\d\\d)\\.(?P<doy>\\d\\d\\d)$', 

1886 selector=(lambda x: int(x.year) == 2009)) 

1887 ''' 

1888 if regex is not None: 

1889 assert include is None 

1890 include = regex 

1891 

1892 return list(iter_select_files( 

1893 paths, include, exclude, selector, show_progress)) 

1894 

1895 

1896def base36encode(number, alphabet='0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'): 

1897 ''' 

1898 Convert positive integer to a base36 string. 

1899 ''' 

1900 

1901 if not isinstance(number, (int, long)): 

1902 raise TypeError('number must be an integer') 

1903 if number < 0: 

1904 raise ValueError('number must be positive') 

1905 

1906 # Special case for small numbers 

1907 if number < 36: 

1908 return alphabet[number] 

1909 

1910 base36 = '' 

1911 while number != 0: 

1912 number, i = divmod(number, 36) 

1913 base36 = alphabet[i] + base36 

1914 

1915 return base36 

1916 

1917 

1918def base36decode(number): 

1919 ''' 

1920 Decode base36 endcoded positive integer. 

1921 ''' 

1922 

1923 return int(number, 36) 

1924 

1925 

1926class UnpackError(Exception): 

1927 ''' 

1928 Exception raised when :py:func:`unpack_fixed` encounters an error. 

1929 ''' 

1930 

1931 pass 

1932 

1933 

1934ruler = ''.join(['%-10i' % i for i in range(8)]) \ 

1935 + '\n' + '0123456789' * 8 + '\n' 

1936 

1937 

1938def unpack_fixed(format, line, *callargs): 

1939 ''' 

1940 Unpack fixed format string, as produced by many fortran codes. 

1941 

1942 :param format: format specification 

1943 :param line: string to be processed 

1944 :param callargs: callbacks for callback fields in the format 

1945 

1946 The format is described by a string of comma-separated fields. Each field 

1947 is defined by a character for the field type followed by the field width. A 

1948 questionmark may be appended to the field description to allow the argument 

1949 to be optional (The data string is then allowed to be filled with blanks 

1950 and ``None`` is returned in this case). 

1951 

1952 The following field types are available: 

1953 

1954 ==== ================================================================ 

1955 Type Description 

1956 ==== ================================================================ 

1957 A string (full field width is extracted) 

1958 a string (whitespace at the beginning and the end is removed) 

1959 i integer value 

1960 f floating point value 

1961 @ special type, a callback must be given for the conversion 

1962 x special field type to skip parts of the string 

1963 ==== ================================================================ 

1964 ''' 

1965 

1966 ipos = 0 

1967 values = [] 

1968 icall = 0 

1969 for form in format.split(','): 

1970 form = form.strip() 

1971 optional = form[-1] == '?' 

1972 form = form.rstrip('?') 

1973 typ = form[0] 

1974 ln = int(form[1:]) 

1975 s = line[ipos:ipos+ln] 

1976 cast = { 

1977 'x': None, 

1978 'A': str, 

1979 'a': lambda x: x.strip(), 

1980 'i': int, 

1981 'f': float, 

1982 '@': 'extra'}[typ] 

1983 

1984 if cast == 'extra': 

1985 cast = callargs[icall] 

1986 icall += 1 

1987 

1988 if cast is not None: 

1989 if optional and s.strip() == '': 

1990 values.append(None) 

1991 else: 

1992 try: 

1993 values.append(cast(s)) 

1994 except Exception: 

1995 mark = [' '] * 80 

1996 mark[ipos:ipos+ln] = ['^'] * ln 

1997 mark = ''.join(mark) 

1998 raise UnpackError( 

1999 'Invalid cast to type "%s" at position [%i:%i] of ' 

2000 'line: \n%s%s\n%s' % ( 

2001 typ, ipos, ipos+ln, ruler, line.rstrip(), mark)) 

2002 

2003 ipos += ln 

2004 

2005 return values 

2006 

2007 

2008_pattern_cache = {} 

2009 

2010 

2011def _nslc_pattern(pattern): 

2012 if pattern not in _pattern_cache: 

2013 rpattern = re.compile(fnmatch.translate(pattern), re.I) 

2014 _pattern_cache[pattern] = rpattern 

2015 else: 

2016 rpattern = _pattern_cache[pattern] 

2017 

2018 return rpattern 

2019 

2020 

2021def match_nslc(patterns, nslc): 

2022 ''' 

2023 Match network-station-location-channel code against pattern or list of 

2024 patterns. 

2025 

2026 :param patterns: pattern or list of patterns 

2027 :param nslc: tuple with (network, station, location, channel) as strings 

2028 

2029 :returns: ``True`` if the pattern matches or if any of the given patterns 

2030 match; or ``False``. 

2031 

2032 The patterns may contain shell-style wildcards: \\*, ?, [seq], [!seq]. 

2033 

2034 Example:: 

2035 

2036 match_nslc('*.HAM3.*.BH?', ('GR', 'HAM3', '', 'BHZ')) # -> True 

2037 ''' 

2038 

2039 if isinstance(patterns, str): 

2040 patterns = [patterns] 

2041 

2042 if not isinstance(nslc, str): 

2043 s = '.'.join(nslc) 

2044 else: 

2045 s = nslc 

2046 

2047 for pattern in patterns: 

2048 if _nslc_pattern(pattern).match(s): 

2049 return True 

2050 

2051 return False 

2052 

2053 

2054def match_nslcs(patterns, nslcs): 

2055 ''' 

2056 Get network-station-location-channel codes that match given pattern or any 

2057 of several given patterns. 

2058 

2059 :param patterns: pattern or list of patterns 

2060 :param nslcs: list of (network, station, location, channel) tuples 

2061 

2062 See also :py:func:`match_nslc` 

2063 ''' 

2064 

2065 matching = [] 

2066 for nslc in nslcs: 

2067 if match_nslc(patterns, nslc): 

2068 matching.append(nslc) 

2069 

2070 return matching 

2071 

2072 

2073class Timeout(Exception): 

2074 pass 

2075 

2076 

2077def create_lockfile(fn, timeout=None, timewarn=10.): 

2078 t0 = time.time() 

2079 

2080 while True: 

2081 try: 

2082 f = os.open(fn, os.O_CREAT | os.O_WRONLY | os.O_EXCL) 

2083 os.close(f) 

2084 return 

2085 

2086 except OSError as e: 

2087 if e.errno == errno.EEXIST: 

2088 tnow = time.time() 

2089 

2090 if timeout is not None and tnow - t0 > timeout: 

2091 raise Timeout( 

2092 'Timeout (%gs) occured while waiting to get exclusive ' 

2093 'access to: %s' % (timeout, fn)) 

2094 

2095 if timewarn is not None and tnow - t0 > timewarn: 

2096 logger.warning( 

2097 'Waiting since %gs to get exclusive access to: %s' % ( 

2098 timewarn, fn)) 

2099 

2100 timewarn *= 2 

2101 

2102 time.sleep(0.01) 

2103 else: 

2104 raise 

2105 

2106 

2107def delete_lockfile(fn): 

2108 os.unlink(fn) 

2109 

2110 

2111class Lockfile(Exception): 

2112 

2113 def __init__(self, path, timeout=5, timewarn=10.): 

2114 self._path = path 

2115 self._timeout = timeout 

2116 self._timewarn = timewarn 

2117 

2118 def __enter__(self): 

2119 create_lockfile( 

2120 self._path, timeout=self._timeout, timewarn=self._timewarn) 

2121 return None 

2122 

2123 def __exit__(self, type, value, traceback): 

2124 delete_lockfile(self._path) 

2125 

2126 

2127class SoleError(Exception): 

2128 ''' 

2129 Exception raised by objects of type :py:class:`Sole`, when an concurrent 

2130 instance is running. 

2131 ''' 

2132 

2133 pass 

2134 

2135 

2136class Sole(object): 

2137 ''' 

2138 Use POSIX advisory file locking to ensure that only a single instance of a 

2139 program is running. 

2140 

2141 :param pid_path: path to lockfile to be used 

2142 

2143 Usage:: 

2144 

2145 from pyrocko.util import Sole, SoleError, setup_logging 

2146 import os 

2147 

2148 setup_logging('my_program') 

2149 

2150 pid_path = os.path.join(os.environ['HOME'], '.my_program_lock') 

2151 try: 

2152 sole = Sole(pid_path) 

2153 

2154 except SoleError, e: 

2155 logger.fatal( str(e) ) 

2156 sys.exit(1) 

2157 ''' 

2158 

2159 def __init__(self, pid_path): 

2160 self._pid_path = pid_path 

2161 self._other_running = False 

2162 ensuredirs(self._pid_path) 

2163 self._lockfile = None 

2164 self._os = os 

2165 

2166 if not fcntl: 

2167 raise SoleError( 

2168 'Python standard library module "fcntl" not available on ' 

2169 'this platform.') 

2170 

2171 self._fcntl = fcntl 

2172 

2173 try: 

2174 self._lockfile = os.open(self._pid_path, os.O_CREAT | os.O_WRONLY) 

2175 except Exception: 

2176 raise SoleError( 

2177 'Cannot open lockfile (path = %s)' % self._pid_path) 

2178 

2179 try: 

2180 fcntl.lockf(self._lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB) 

2181 

2182 except IOError: 

2183 self._other_running = True 

2184 try: 

2185 f = open(self._pid_path, 'r') 

2186 pid = f.read().strip() 

2187 f.close() 

2188 except Exception: 

2189 pid = '?' 

2190 

2191 raise SoleError('Other instance is running (pid = %s)' % pid) 

2192 

2193 try: 

2194 os.ftruncate(self._lockfile, 0) 

2195 os.write(self._lockfile, '%i\n' % os.getpid()) 

2196 os.fsync(self._lockfile) 

2197 

2198 except Exception: 

2199 # the pid is only stored for user information, so this is allowed 

2200 # to fail 

2201 pass 

2202 

2203 def __del__(self): 

2204 if not self._other_running: 

2205 if self._lockfile is not None: 

2206 self._fcntl.lockf(self._lockfile, self._fcntl.LOCK_UN) 

2207 self._os.close(self._lockfile) 

2208 try: 

2209 self._os.unlink(self._pid_path) 

2210 except Exception: 

2211 pass 

2212 

2213 

2214re_escapequotes = re.compile(r"(['\\])") 

2215 

2216 

2217def escapequotes(s): 

2218 return re_escapequotes.sub(r"\\\1", s) 

2219 

2220 

2221class TableWriter(object): 

2222 ''' 

2223 Write table of space separated values to a file. 

2224 

2225 :param f: file like object 

2226 

2227 Strings containing spaces are quoted on output. 

2228 ''' 

2229 

2230 def __init__(self, f): 

2231 self._f = f 

2232 

2233 def writerow(self, row, minfieldwidths=None): 

2234 

2235 ''' 

2236 Write one row of values to underlying file. 

2237 

2238 :param row: iterable of values 

2239 :param minfieldwidths: minimum field widths for the values 

2240 

2241 Each value in in ``row`` is converted to a string and optionally padded 

2242 with blanks. The resulting strings are output separated with blanks. If 

2243 any values given are strings and if they contain whitespace, they are 

2244 quoted with single quotes, and any internal single quotes are 

2245 backslash-escaped. 

2246 ''' 

2247 

2248 out = [] 

2249 

2250 for i, x in enumerate(row): 

2251 w = 0 

2252 if minfieldwidths and i < len(minfieldwidths): 

2253 w = minfieldwidths[i] 

2254 

2255 if isinstance(x, str): 

2256 if re.search(r"\s|'", x): 

2257 x = "'%s'" % escapequotes(x) 

2258 

2259 x = x.ljust(w) 

2260 else: 

2261 x = str(x).rjust(w) 

2262 

2263 out.append(x) 

2264 

2265 self._f.write(' '.join(out).rstrip() + '\n') 

2266 

2267 

2268class TableReader(object): 

2269 

2270 ''' 

2271 Read table of space separated values from a file. 

2272 

2273 :param f: file-like object 

2274 

2275 This uses Pythons shlex module to tokenize lines. Should deal correctly 

2276 with quoted strings. 

2277 ''' 

2278 

2279 def __init__(self, f): 

2280 self._f = f 

2281 self.eof = False 

2282 

2283 def readrow(self): 

2284 ''' 

2285 Read one row from the underlying file, tokenize it with shlex. 

2286 

2287 :returns: tokenized line as a list of strings. 

2288 ''' 

2289 

2290 line = self._f.readline() 

2291 if not line: 

2292 self.eof = True 

2293 return [] 

2294 line.strip() 

2295 if line.startswith('#'): 

2296 return [] 

2297 

2298 return qsplit(line) 

2299 

2300 

2301def gform(number, significant_digits=3): 

2302 ''' 

2303 Pretty print floating point numbers. 

2304 

2305 Align floating point numbers at the decimal dot. 

2306 

2307 :: 

2308 

2309 | -d.dde+xxx| 

2310 | -d.dde+xx | 

2311 |-ddd. | 

2312 | -dd.d | 

2313 | -d.dd | 

2314 | -0.ddd | 

2315 | -0.0ddd | 

2316 | -0.00ddd | 

2317 | -d.dde-xx | 

2318 | -d.dde-xxx| 

2319 | nan| 

2320 

2321 

2322 The formatted string has length ``significant_digits * 2 + 6``. 

2323 ''' 

2324 

2325 no_exp_range = (pow(10., -1), 

2326 pow(10., significant_digits)) 

2327 width = significant_digits+significant_digits-1+1+1+5 

2328 

2329 if (no_exp_range[0] <= abs(number) < no_exp_range[1]) or number == 0.: 

2330 s = ('%#.*g' % (significant_digits, number)).rstrip('0') 

2331 else: 

2332 s = '%.*E' % (significant_digits-1, number) 

2333 s = (' '*(-s.find('.')+(significant_digits+1))+s).ljust(width) 

2334 if s.strip().lower() == 'nan': 

2335 s = 'nan'.rjust(width) 

2336 return s 

2337 

2338 

2339def human_bytesize(value): 

2340 

2341 exts = 'Bytes kB MB GB TB PB EB ZB YB'.split() 

2342 

2343 if value == 1: 

2344 return '1 Byte' 

2345 

2346 for i, ext in enumerate(exts): 

2347 x = float(value) / 1000**i 

2348 if round(x) < 10. and not value < 1000: 

2349 return '%.1f %s' % (x, ext) 

2350 if round(x) < 1000.: 

2351 return '%.0f %s' % (x, ext) 

2352 

2353 return '%i Bytes' % value 

2354 

2355 

2356re_compatibility = re.compile( 

2357 r'!pyrocko\.(trace|gf\.(meta|seismosizer)|fomosto\.' + 

2358 r'(dummy|poel|qseis|qssp))\.' 

2359) 

2360 

2361 

2362def pf_is_old(fn): 

2363 oldstyle = False 

2364 with open(fn, 'r') as f: 

2365 for line in f: 

2366 if re_compatibility.search(line): 

2367 oldstyle = True 

2368 

2369 return oldstyle 

2370 

2371 

2372def pf_upgrade(fn): 

2373 need = pf_is_old(fn) 

2374 if need: 

2375 fn_temp = fn + '.temp' 

2376 

2377 with open(fn, 'r') as fin: 

2378 with open(fn_temp, 'w') as fout: 

2379 for line in fin: 

2380 line = re_compatibility.sub('!pf.', line) 

2381 fout.write(line) 

2382 

2383 os.rename(fn_temp, fn) 

2384 

2385 return need 

2386 

2387 

2388def read_leap_seconds(tzfile='/usr/share/zoneinfo/right/UTC'): 

2389 ''' 

2390 Extract leap second information from tzdata. 

2391 

2392 Based on example at http://stackoverflow.com/questions/19332902/\ 

2393 extract-historic-leap-seconds-from-tzdata 

2394 

2395 See also 'man 5 tzfile'. 

2396 ''' 

2397 

2398 from struct import unpack, calcsize 

2399 out = [] 

2400 with open(tzfile, 'rb') as f: 

2401 # read header 

2402 fmt = '>4s c 15x 6l' 

2403 (magic, format, ttisgmtcnt, ttisstdcnt, leapcnt, timecnt, 

2404 typecnt, charcnt) = unpack(fmt, f.read(calcsize(fmt))) 

2405 assert magic == 'TZif'.encode('US-ASCII'), 'Not a timezone file' 

2406 

2407 # skip over some uninteresting data 

2408 fmt = '>%(timecnt)dl %(timecnt)dB %(ttinfo)s %(charcnt)ds' % dict( 

2409 timecnt=timecnt, ttinfo='lBB'*typecnt, charcnt=charcnt) 

2410 f.read(calcsize(fmt)) 

2411 

2412 # read leap-seconds 

2413 fmt = '>2l' 

2414 for i in range(leapcnt): 

2415 tleap, nleap = unpack(fmt, f.read(calcsize(fmt))) 

2416 out.append((tleap-nleap+1, nleap)) 

2417 

2418 return out 

2419 

2420 

2421class LeapSecondsError(Exception): 

2422 pass 

2423 

2424 

2425class LeapSecondsOutdated(LeapSecondsError): 

2426 pass 

2427 

2428 

2429class InvalidLeapSecondsFile(LeapSecondsOutdated): 

2430 pass 

2431 

2432 

2433def parse_leap_seconds_list(fn): 

2434 data = [] 

2435 texpires = None 

2436 try: 

2437 t0 = int(round(str_to_time('1900-01-01 00:00:00'))) 

2438 except TimeStrError: 

2439 t0 = int(round(str_to_time('1970-01-01 00:00:00'))) - 2208988800 

2440 

2441 tnow = int(round(time.time())) 

2442 

2443 if not op.exists(fn): 

2444 raise LeapSecondsOutdated('no leap seconds file found') 

2445 

2446 try: 

2447 with open(fn, 'rb') as f: 

2448 for line in f: 

2449 if line.strip().startswith(b'<!DOCTYPE'): 

2450 raise InvalidLeapSecondsFile('invalid leap seconds file') 

2451 

2452 if line.startswith(b'#@'): 

2453 texpires = int(line.split()[1]) + t0 

2454 elif line.startswith(b'#') or len(line) < 5: 

2455 pass 

2456 else: 

2457 toks = line.split() 

2458 t = int(toks[0]) + t0 

2459 nleap = int(toks[1]) - 10 

2460 data.append((t, nleap)) 

2461 

2462 except IOError: 

2463 raise LeapSecondsError('cannot read leap seconds file %s' % fn) 

2464 

2465 if texpires is None or tnow > texpires: 

2466 raise LeapSecondsOutdated('leap seconds list is outdated') 

2467 

2468 return data 

2469 

2470 

2471def read_leap_seconds2(): 

2472 from pyrocko import config 

2473 conf = config.config() 

2474 fn = conf.leapseconds_path 

2475 url = conf.leapseconds_url 

2476 # check for outdated default URL 

2477 if url == 'http://www.ietf.org/timezones/data/leap-seconds.list': 

2478 url = 'https://www.ietf.org/timezones/data/leap-seconds.list' 

2479 logger.info( 

2480 'Leap seconds default URL is now: %s\nUsing new default.' % url) 

2481 

2482 for i in range(3): 

2483 try: 

2484 return parse_leap_seconds_list(fn) 

2485 

2486 except LeapSecondsOutdated: 

2487 try: 

2488 logger.info('updating leap seconds list...') 

2489 download_file(url, fn) 

2490 

2491 except Exception as e: 

2492 raise LeapSecondsError( 

2493 'cannot download leap seconds list from %s to %s (%s)' 

2494 % (url, fn, e)) 

2495 

2496 raise LeapSecondsError('Could not retrieve/read leap seconds file.') 

2497 

2498 

2499def gps_utc_offset(t_utc): 

2500 ''' 

2501 Time offset t_gps - t_utc for a given t_utc. 

2502 ''' 

2503 ls = read_leap_seconds2() 

2504 i = 0 

2505 if t_utc < ls[0][0]: 

2506 return ls[0][1] - 1 - 9 

2507 

2508 while i < len(ls) - 1: 

2509 if ls[i][0] <= t_utc and t_utc < ls[i+1][0]: 

2510 return ls[i][1] - 9 

2511 i += 1 

2512 

2513 return ls[-1][1] - 9 

2514 

2515 

2516def utc_gps_offset(t_gps): 

2517 ''' 

2518 Time offset t_utc - t_gps for a given t_gps. 

2519 ''' 

2520 ls = read_leap_seconds2() 

2521 

2522 if t_gps < ls[0][0] + ls[0][1] - 9: 

2523 return - (ls[0][1] - 1 - 9) 

2524 

2525 i = 0 

2526 while i < len(ls) - 1: 

2527 if ls[i][0] + ls[i][1] - 9 <= t_gps \ 

2528 and t_gps < ls[i+1][0] + ls[i+1][1] - 9: 

2529 return - (ls[i][1] - 9) 

2530 i += 1 

2531 

2532 return - (ls[-1][1] - 9) 

2533 

2534 

2535def make_iload_family(iload_fh, doc_fmt='FMT', doc_yielded_objects='FMT'): 

2536 import itertools 

2537 import glob 

2538 from pyrocko.io.io_common import FileLoadError 

2539 

2540 def iload_filename(filename, **kwargs): 

2541 try: 

2542 with open(filename, 'rb') as f: 

2543 for cr in iload_fh(f, **kwargs): 

2544 yield cr 

2545 

2546 except FileLoadError as e: 

2547 e.set_context('filename', filename) 

2548 raise 

2549 

2550 def iload_dirname(dirname, **kwargs): 

2551 for entry in os.listdir(dirname): 

2552 fpath = op.join(dirname, entry) 

2553 if op.isfile(fpath): 

2554 for cr in iload_filename(fpath, **kwargs): 

2555 yield cr 

2556 

2557 def iload_glob(pattern, **kwargs): 

2558 

2559 for fn in glob.iglob(pattern): 

2560 for cr in iload_filename(fn, **kwargs): 

2561 yield cr 

2562 

2563 def iload(source, **kwargs): 

2564 if isinstance(source, str): 

2565 if op.isdir(source): 

2566 return iload_dirname(source, **kwargs) 

2567 elif op.isfile(source): 

2568 return iload_filename(source, **kwargs) 

2569 else: 

2570 return iload_glob(source, **kwargs) 

2571 

2572 elif hasattr(source, 'read'): 

2573 return iload_fh(source, **kwargs) 

2574 else: 

2575 return itertools.chain.from_iterable( 

2576 iload(subsource, **kwargs) for subsource in source) 

2577 

2578 iload_filename.__doc__ = ''' 

2579 Read %s information from named file. 

2580 ''' % doc_fmt 

2581 

2582 iload_dirname.__doc__ = ''' 

2583 Read %s information from directory of %s files. 

2584 ''' % (doc_fmt, doc_fmt) 

2585 

2586 iload_glob.__doc__ = ''' 

2587 Read %s information from files matching a glob pattern. 

2588 ''' % doc_fmt 

2589 

2590 iload.__doc__ = ''' 

2591 Load %s information from given source(s) 

2592 

2593 The ``source`` can be specified as the name of a %s file, the name of a 

2594 directory containing %s files, a glob pattern of %s files, an open 

2595 filehandle or an iterator yielding any of the forementioned sources. 

2596 

2597 This function behaves as a generator yielding %s objects. 

2598 ''' % (doc_fmt, doc_fmt, doc_fmt, doc_fmt, doc_yielded_objects) 

2599 

2600 for f in iload_filename, iload_dirname, iload_glob, iload: 

2601 f.__module__ = iload_fh.__module__ 

2602 

2603 return iload_filename, iload_dirname, iload_glob, iload 

2604 

2605 

2606class Inconsistency(Exception): 

2607 pass 

2608 

2609 

2610def consistency_check(list_of_tuples, message='values differ:'): 

2611 ''' 

2612 Check for inconsistencies. 

2613 

2614 Given a list of tuples, check that all tuple elements except for first one 

2615 match. E.g. ``[('STA.N', 55.3, 103.2), ('STA.E', 55.3, 103.2)]`` would be 

2616 valid because the coordinates at the two channels are the same. 

2617 ''' 

2618 

2619 if len(list_of_tuples) >= 2: 

2620 if any(t[1:] != list_of_tuples[0][1:] for t in list_of_tuples[1:]): 

2621 raise Inconsistency('%s\n' % message + '\n'.join( 

2622 ' %s: %s' % (t[0], ', '.join('%g' % x for x in t[1:])) 

2623 for t in list_of_tuples)) 

2624 

2625 

2626class defaultzerodict(dict): 

2627 def __missing__(self, k): 

2628 return 0 

2629 

2630 

2631def mostfrequent(x): 

2632 c = defaultzerodict() 

2633 for e in x: 

2634 c[e] += 1 

2635 

2636 return sorted(list(c.keys()), key=lambda k: c[k])[-1] 

2637 

2638 

2639def consistency_merge(list_of_tuples, 

2640 message='values differ:', 

2641 error='raise', 

2642 merge=mostfrequent): 

2643 

2644 assert error in ('raise', 'warn', 'ignore') 

2645 

2646 if len(list_of_tuples) == 0: 

2647 raise Exception('cannot merge empty sequence') 

2648 

2649 try: 

2650 consistency_check(list_of_tuples, message) 

2651 return list_of_tuples[0][1:] 

2652 except Inconsistency as e: 

2653 if error == 'raise': 

2654 raise 

2655 

2656 elif error == 'warn': 

2657 logger.warning(str(e)) 

2658 

2659 return tuple([merge(x) for x in list(zip(*list_of_tuples))[1:]]) 

2660 

2661 

2662def short_to_list(nmax, it): 

2663 import itertools 

2664 

2665 if isinstance(it, list): 

2666 return it 

2667 

2668 li = [] 

2669 for i in range(nmax+1): 

2670 try: 

2671 li.append(next(it)) 

2672 except StopIteration: 

2673 return li 

2674 

2675 return itertools.chain(li, it) 

2676 

2677 

2678def parse_md(f): 

2679 try: 

2680 with open(op.join( 

2681 op.dirname(op.abspath(f)), 

2682 'README.md'), 'r') as readme: 

2683 mdstr = readme.read() 

2684 except IOError as e: 

2685 return 'Failed to get README.md: %s' % e 

2686 

2687 # Remve the title 

2688 mdstr = re.sub(r'^# .*\n?', '', mdstr) 

2689 # Append sphinx reference to `pyrocko.` modules 

2690 mdstr = re.sub(r'`pyrocko\.(.*)`', r':py:mod:`pyrocko.\1`', mdstr) 

2691 # Convert Subsections to toc-less rubrics 

2692 mdstr = re.sub(r'## (.*)\n', r'.. rubric:: \1\n', mdstr) 

2693 return mdstr 

2694 

2695 

2696def mpl_show(plt): 

2697 import matplotlib 

2698 if matplotlib.get_backend().lower() == 'agg': 

2699 logger.warning('Cannot show() when using matplotlib "agg" backend') 

2700 else: 

2701 plt.show() 

2702 

2703 

2704g_re_qsplit = re.compile( 

2705 r'"([^"\\]*(?:\\.[^"\\]*)*)"|\'([^\'\\]*(?:\\.[^\'\\]*)*)\'|(\S+)') 

2706g_re_qsplit_sep = {} 

2707 

2708 

2709def get_re_qsplit(sep): 

2710 if sep is None: 

2711 return g_re_qsplit 

2712 else: 

2713 if sep not in g_re_qsplit_sep: 

2714 assert len(sep) == 1 

2715 assert sep not in '\'"' 

2716 esep = re.escape(sep) 

2717 g_re_qsplit_sep[sep] = re.compile( 

2718 r'"([^"\\]*(?:\\.[^"\\]*)*)"|\'([^\'\\]*(?:\\.[^\'\\]*)*)\'|' 

2719 + r'([^' + esep + r']+|(?<=' + esep + r')(?=' + esep + r')|^(?=' + esep + r')|(?<=' + esep + r')$)') # noqa 

2720 return g_re_qsplit_sep[sep] 

2721 

2722 

2723g_re_trivial = re.compile(r'\A[^\'"\s]+\Z') 

2724g_re_trivial_sep = {} 

2725 

2726 

2727def get_re_trivial(sep): 

2728 if sep is None: 

2729 return g_re_trivial 

2730 else: 

2731 if sep not in g_re_qsplit_sep: 

2732 assert len(sep) == 1 

2733 assert sep not in '\'"' 

2734 esep = re.escape(sep) 

2735 g_re_trivial_sep[sep] = re.compile(r'\A[^\'"' + esep + r']+\Z') 

2736 

2737 return g_re_trivial_sep[sep] 

2738 

2739 

2740g_re_escape_s = re.compile(r'([\\\'])') 

2741g_re_unescape_s = re.compile(r'\\([\\\'])') 

2742g_re_escape_d = re.compile(r'([\\"])') 

2743g_re_unescape_d = re.compile(r'\\([\\"])') 

2744 

2745 

2746def escape_s(s): 

2747 ''' 

2748 Backslash-escape single-quotes and backslashes. 

2749 

2750 Example: ``Jack's`` => ``Jack\\'s`` 

2751 

2752 ''' 

2753 return g_re_escape_s.sub(r'\\\1', s) 

2754 

2755 

2756def unescape_s(s): 

2757 ''' 

2758 Unescape backslash-escaped single-quotes and backslashes. 

2759 

2760 Example: ``Jack\\'s`` => ``Jack's`` 

2761 ''' 

2762 return g_re_unescape_s.sub(r'\1', s) 

2763 

2764 

2765def escape_d(s): 

2766 ''' 

2767 Backslash-escape double-quotes and backslashes. 

2768 

2769 Example: ``"Hello \\O/"`` => ``\\"Hello \\\\O/\\"`` 

2770 ''' 

2771 return g_re_escape_d.sub(r'\\\1', s) 

2772 

2773 

2774def unescape_d(s): 

2775 ''' 

2776 Unescape backslash-escaped double-quotes and backslashes. 

2777 

2778 Example: ``\\"Hello \\\\O/\\"`` => ``"Hello \\O/"`` 

2779 ''' 

2780 return g_re_unescape_d.sub(r'\1', s) 

2781 

2782 

2783def qjoin_s(it, sep=None): 

2784 ''' 

2785 Join sequence of strings into a line, single-quoting non-trivial strings. 

2786 

2787 Example: ``["55", "Sparrow's Island"]`` => ``"55 'Sparrow\\\\'s Island'"`` 

2788 ''' 

2789 re_trivial = get_re_trivial(sep) 

2790 

2791 if sep is None: 

2792 sep = ' ' 

2793 

2794 return sep.join( 

2795 w if re_trivial.search(w) else "'%s'" % escape_s(w) for w in it) 

2796 

2797 

2798def qjoin_d(it, sep=None): 

2799 ''' 

2800 Join sequence of strings into a line, double-quoting non-trivial strings. 

2801 

2802 Example: ``['55', 'Pete "The Robot" Smith']`` => 

2803 ``'55' "Pete \\\\"The Robot\\\\" Smith"'`` 

2804 ''' 

2805 re_trivial = get_re_trivial(sep) 

2806 if sep is None: 

2807 sep = ' ' 

2808 

2809 return sep.join( 

2810 w if re_trivial.search(w) else '"%s"' % escape_d(w) for w in it) 

2811 

2812 

2813def qsplit(s, sep=None): 

2814 ''' 

2815 Split line into list of strings, allowing for quoted strings. 

2816 

2817 Example: ``"55 'Sparrow\\\\'s Island'"`` => 

2818 ``["55", "Sparrow's Island"]``, 

2819 ``'55' "Pete \\\\"The Robot\\\\" Smith"'`` => 

2820 ``['55', 'Pete "The Robot" Smith']`` 

2821 ''' 

2822 re_qsplit = get_re_qsplit(sep) 

2823 return [ 

2824 (unescape_d(x[0]) or unescape_s(x[1]) or x[2]) 

2825 for x in re_qsplit.findall(s)] 

2826 

2827 

2828g_have_warned_threadpoolctl = False 

2829 

2830 

2831class threadpool_limits_dummy(object): 

2832 

2833 def __init__(self, *args, **kwargs): 

2834 pass 

2835 

2836 def __enter__(self): 

2837 global g_have_warned_threadpoolctl 

2838 

2839 if not g_have_warned_threadpoolctl: 

2840 logger.warning( 

2841 'Cannot control number of BLAS threads because ' 

2842 '`threadpoolctl` module is not available. You may want to ' 

2843 'install `threadpoolctl`.') 

2844 

2845 g_have_warned_threadpoolctl = True 

2846 

2847 return self 

2848 

2849 def __exit__(self, type, value, traceback): 

2850 pass 

2851 

2852 

2853def get_threadpool_limits(): 

2854 ''' 

2855 Try to import threadpoolctl.threadpool_limits, provide dummy if not avail. 

2856 ''' 

2857 

2858 try: 

2859 from threadpoolctl import threadpool_limits 

2860 return threadpool_limits 

2861 

2862 except ImportError: 

2863 return threadpool_limits_dummy