1# http://pyrocko.org - GPLv3
2#
3# The Pyrocko Developers, 21st Century
4# ---|P------/S----------~Lg----------
6import sys
7import struct
8import re
9import numpy as num
11from io import StringIO
12from collections import namedtuple
14from pyrocko import util, trace
15from .io_common import FileLoadError
18g_guralp_zero = None
21def get_guralp_zero():
22 global g_guralp_zero
23 if g_guralp_zero is None:
24 g_guralp_zero = util.str_to_time('1989-11-17 00:00:00')
26 return g_guralp_zero
29class GCFLoadError(FileLoadError):
30 pass
33class EOF(Exception):
34 pass
37Header = namedtuple(
38 'Header',
39 '''
40 block_type
41 system_id
42 stream_id
43 instrument_type
44 time
45 gain
46 ttl
47 sample_rate
48 compression
49 nrecords
50 '''.split())
53def read(f, n, eof_ok=False):
54 data = f.read(n)
55 if eof_ok and len(data) == 0:
56 raise EOF()
58 if len(data) != n:
59 raise GCFLoadError('Unexpected end of file')
61 return data
64def read_header(f, endianness='>'):
65 e = endianness
66 data = read(f, 16, eof_ok=True)
68 isystem_id, istream_id = struct.unpack(e+'II', data[:8])
69 ex = isystem_id & 0x80000000
70 if ex:
71 ex2 = isystem_id & (1 << 30)
72 if ex2:
73 system_id = util.base36encode(isystem_id & (2**21 - 1))
74 else:
75 system_id = util.base36encode(isystem_id & (2**26 - 1))
77 instrument_type = (isystem_id >> 26) & 0b1
78 gain = [None, 1, 2, 4, 8, 16, 32, 64][(isystem_id >> 27) & 0b111]
79 else:
81 system_id = util.base36encode(isystem_id)
82 instrument_type = None
83 gain = None
85 stream_id = util.base36encode(istream_id)
87 i_day_second = struct.unpack(e+'I', data[8:12])[0]
88 iday = i_day_second >> 17
89 isecond = i_day_second & 0x1ffff
90 time = (iday*24*60*60) + get_guralp_zero() + isecond
92 ittl, israte, compression, nrecords = struct.unpack(e+'BBBB', data[12:])
93 if nrecords > 250:
94 raise FileLoadError('Header indicates too many records in block.')
96 if israte == 0:
97 if compression == 4 and stream_id[-2:] == '00':
98 block_type = 'status_block'
100 elif compression == 4 and stream_id[-2:] == '01':
101 block_type = 'unified_status_block'
103 elif compression == 4 and stream_id[-2:] == 'SM':
104 block_type = 'strong_motion_block'
106 elif stream_id[-2:] == 'CD':
107 block_type = 'cd_status_block'
109 elif compression == 4 and stream_id[-2:] == 'BP':
110 block_type = 'byte_pipe_block'
112 elif stream_id[-2:] == 'IB':
113 block_type = 'information_block'
115 else:
116 raise FileLoadError('Unexpected block type found.')
118 return Header(block_type, system_id, stream_id, instrument_type,
119 time, gain, ittl, 0.0, compression, nrecords)
120 else:
121 block_type = 'data_block'
123 if not re.match(r'^([ZNEXC][0-9A-CG-S]|M[0-9A-F])$', stream_id[-2:]):
124 raise FileLoadError('Unexpected data stream ID')
126 sample_rate_tab = {
127 157: (0.1, None),
128 161: (0.125, None),
129 162: (0.2, None),
130 164: (0.25, None),
131 167: (0.5, None),
132 171: (400., 8),
133 174: (500., 2),
134 176: (1000., 4),
135 179: (2000., 8),
136 181: (4000., 16)}
138 if israte in sample_rate_tab:
139 sample_rate, tfod = sample_rate_tab[israte]
140 else:
141 sample_rate = float(israte)
142 tfod = None
144 if tfod is not None:
145 toff = (compression >> 4) // tfod
146 compression = compression & 0b1111
147 time += toff
149 if compression not in (1, 2, 4):
150 raise GCFLoadError(
151 'Unsupported compression code: %i' % compression)
153 return Header(block_type, system_id, stream_id, instrument_type,
154 time, gain, ittl, sample_rate, compression, nrecords)
157def read_data(f, h, endianness='>'):
158 e = endianness
159 data = read(f, 1024 - 16)
160 first = struct.unpack(e+'i', data[0:4])[0]
161 dtype = {1: e+'i4', 2: e+'i2', 4: e+'i1'}
162 if h.compression not in dtype:
163 raise GCFLoadError('Unsupported compression code: %i' % h.compression)
165 nsamples = h.compression * h.nrecords
166 difs = num.frombuffer(data[4:4+h.nrecords*4], dtype=dtype[h.compression],
167 count=nsamples)
168 samples = difs.astype(num.int32)
169 samples[0] += first
170 samples = num.cumsum(samples, dtype=num.int32)
171 last = struct.unpack(e+'i', data[4+h.nrecords*4:4+h.nrecords*4+4])[0]
172 if last != samples[-1]:
173 raise GCFLoadError('Checksum error occured')
175 return samples
178def read_status(f, h):
179 data = read(f, 1024 - 16)
180 return data[:h.nrecords*4]
183def iload(filename, load_data=True):
184 traces = {}
186 with open(filename, 'rb') as f:
187 try:
188 while True:
189 h = read_header(f)
190 if h.block_type == 'data_block':
191 deltat = 1.0 / h.sample_rate
192 if load_data:
193 samples = read_data(f, h)
194 tmax = None
195 else:
196 f.seek(1024 - 16, 1)
197 samples = None
198 tmax = h.time + (
199 h.nrecords * h.compression - 1) * deltat
201 nslc = ('', h.system_id, '', h.stream_id)
203 if nslc in traces:
204 tr = traces[nslc]
205 if abs((tr.tmax + tr.deltat) - h.time) < deltat*0.0001:
206 if samples is not None:
207 tr.append(samples)
208 else:
209 tr.tmax = tmax
211 else:
212 del traces[nslc]
213 yield tr
215 if nslc not in traces:
216 traces[nslc] = trace.Trace(
217 *nslc,
218 tmin=h.time,
219 deltat=deltat,
220 ydata=samples,
221 tmax=tmax)
223 else:
224 f.seek(1024 - 16, 1)
226 except EOF:
227 for tr in traces.values():
228 yield tr
231def detect(first512):
232 # does not work properly, produces too many false positives
233 # difficult to improve due to the very compact GCF header
234 try:
235 if len(first512) < 512:
236 return False
238 f = StringIO(first512)
239 read_header(f)
240 return True
242 except Exception:
243 return False
246if __name__ == '__main__':
247 util.setup_logging('warn')
249 all_traces = []
250 for fn in sys.argv[1:]:
251 if detect(open(fn, 'rb').read(512)):
252 print(fn)
253 all_traces.extend(iload(fn))
255 trace.snuffle(all_traces)