1import logging
2import os.path as op
3import struct
4import datetime
5import mmap
6import numpy as num
8from pyrocko import trace
10logger = logging.getLogger(__name__)
13def write_property_dict(prop_dict, out_file):
14 from pprint import pformat
16 f = open(out_file, "w")
17 f.write("tdms_property_map=")
18 f.write(pformat(prop_dict))
19 f.close()
22def type_not_supported(vargin):
23 """Function raises a NotImplementedException."""
24 raise NotImplementedError("Reading of this tdsDataType is not implemented")
27def parse_time_stamp(fractions, seconds):
28 """
29 Convert time TDMS time representation to datetime
30 fractions -- fractional seconds (2^-64)
31 seconds -- The number of seconds since 1/1/1904
32 @rtype : datetime.datetime
33 """
34 if (
35 fractions is not None
36 and seconds is not None
37 and fractions + seconds > 0
38 ):
39 return datetime.timedelta(
40 0, fractions * 2 ** -64 + seconds
41 ) + datetime.datetime(1904, 1, 1)
42 else:
43 return None
46# Enum mapping TDM data types to description string, numpy type where exists
47# See Ref[2] for enum values
48TDS_DATA_TYPE = dict(
49 {
50 0x00: "void", # tdsTypeVoid
51 0x01: "int8", # tdsTypeI8
52 0x02: "int16", # tdsTypeI16
53 0x03: "int32", # tdsTypeI32
54 0x04: "int64", # tdsTypeI64
55 0x05: "uint8", # tdsTypeU8
56 0x06: "uint16", # tdsTypeU16
57 0x07: "uint32", # tdsTypeU32
58 0x08: "uint64", # tdsTypeU64
59 0x09: "float32", # tdsTypeSingleFloat
60 0x0A: "float64", # tdsTypeDoubleFloat
61 0x0B: "float128", # tdsTypeExtendedFloat
62 0x19: "singleFloatWithUnit", # tdsTypeSingleFloatWithUnit
63 0x1A: "doubleFloatWithUnit", # tdsTypeDoubleFloatWithUnit
64 0x1B: "extendedFloatWithUnit", # tdsTypeExtendedFloatWithUnit
65 0x20: "str", # tdsTypeString
66 0x21: "bool", # tdsTypeBoolean
67 0x44: "datetime", # tdsTypeTimeStamp
68 0xFFFFFFFF: "raw", # tdsTypeDAQmxRawData
69 }
70)
72# Function mapping for reading TDMS data types
73TDS_READ_VAL = dict(
74 {
75 "void": lambda f: None, # tdsTypeVoid
76 "int8": lambda f: struct.unpack("<b", f.read(1))[0],
77 "int16": lambda f: struct.unpack("<h", f.read(2))[0],
78 "int32": lambda f: struct.unpack("<i", f.read(4))[0],
79 "int64": lambda f: struct.unpack("<q", f.read(8))[0],
80 "uint8": lambda f: struct.unpack("<B", f.read(1))[0],
81 "uint16": lambda f: struct.unpack("<H", f.read(2))[0],
82 "uint32": lambda f: struct.unpack("<I", f.read(4))[0],
83 "uint64": lambda f: struct.unpack("<Q", f.read(8))[0],
84 "float32": lambda f: struct.unpack("<f", f.read(4))[0],
85 "float64": lambda f: struct.unpack("<d", f.read(8))[0],
86 "float128": type_not_supported,
87 "singleFloatWithUnit": type_not_supported,
88 "doubleFloatWithUnit": type_not_supported,
89 "extendedFloatWithUnit": type_not_supported,
90 "str": lambda f: f.read(struct.unpack("<i", f.read(4))[0]),
91 "bool": lambda f: struct.unpack("<?", f.read(1))[0],
92 "datetime": lambda f: parse_time_stamp(
93 struct.unpack("<Q", f.read(8))[0],
94 struct.unpack("<q", f.read(8))[0],
95 ),
96 "raw": type_not_supported,
97 }
98)
100DECIMATE_MASK = 0b00100000
101LEAD_IN_LENGTH = 28
102FILEINFO_NAMES = (
103 "file_tag",
104 "toc",
105 "version",
106 "next_segment_offset",
107 "raw_data_offset",
108)
111class TdmsReader(object):
112 """A TDMS file reader object for reading properties and data"""
114 def __init__(self, filename):
115 self._properties = None
116 self._end_of_properties_offset = None
117 self._data_type = None
118 self._chunk_size = None
120 self._raw_data = None
121 self._raw_data2 = None # The mapped data in the 'Next Segment'
122 self._raw_last_chunk = None
123 self._raw2_last_chunk = None
125 self.file_size = op.getsize(filename)
126 self._channel_length = None
127 self._seg1_length = None
128 self._seg2_length = None
130 # TODO: Error if file not big enough to hold header
131 self._tdms_file = open(filename, "rb")
132 # Read lead in (28 bytes):
133 lead_in = self._tdms_file.read(LEAD_IN_LENGTH)
134 # lead_in is 28 bytes:
135 # [string of length 4][int32][int32][int64][int64]
136 fields = struct.unpack("<4siiQQ", lead_in)
138 if fields[0].decode() not in "TDSm":
139 msg = "Not a TDMS file (TDSm tag not found)"
140 raise (TypeError, msg)
142 self.fileinfo = dict(zip(FILEINFO_NAMES, fields))
143 self.fileinfo["decimated"] = not bool(
144 self.fileinfo["toc"] & DECIMATE_MASK
145 )
146 # Make offsets relative to beginning of file:
147 self.fileinfo["next_segment_offset"] += LEAD_IN_LENGTH
148 self.fileinfo["raw_data_offset"] += LEAD_IN_LENGTH
149 self.fileinfo["file_size"] = op.getsize(self._tdms_file.name)
151 # TODO: Validate lead in:
152 if self.fileinfo["next_segment_offset"] > self.file_size:
153 self.fileinfo["next_segment_offset"] = self.file_size
154 # raise(ValueError, "Next Segment Offset too large in TDMS header")
156 def __enter__(self):
157 return self
159 def __exit__(self, exc_type, exc_value, traceback):
160 self._tdms_file.close()
162 @property
163 def channel_length(self):
164 if self._properties is None:
165 self.get_properties()
167 rdo = num.int(self.fileinfo["raw_data_offset"])
168 nch = num.int(self.n_channels)
169 nso = self.fileinfo["next_segment_offset"]
170 return num.int((nso - rdo) / nch / num.dtype(self._data_type).itemsize)
172 @property
173 def n_channels(self):
174 if self._properties is None:
175 self.get_properties()
176 return self.fileinfo['n_channels']
178 def get_properties(self, mapped=False):
179 """
180 Return a dictionary of properties. Read from file only if necessary.
181 """
182 # Check if already hold properties in memory
183 if self._properties is None:
184 self._properties = self._read_properties()
185 return self._properties
187 def _read_property(self):
188 """
189 Read a single property from the TDMS file.
190 Return the name, type and value of the property as a list.
191 """
192 # Read length of object path:
193 var = struct.unpack("<i", self._tdms_file.read(4))[0]
194 # Read property name and type:
195 name, data_type = struct.unpack(
196 "<{0}si".format(var), self._tdms_file.read(var + 4)
197 )
198 # Lookup function to read and parse property value based on type:
199 value = TDS_READ_VAL[TDS_DATA_TYPE[data_type]](self._tdms_file)
200 name = name.decode()
201 if data_type == 32:
202 value = value.decode()
204 return name, data_type, value
206 def _read_properties(self):
207 """Read the properties from the file"""
208 self._tdms_file.seek(LEAD_IN_LENGTH, 0)
209 # Number of channels is total objects - file objects - group objects
210 self.fileinfo["n_channels"] = (
211 struct.unpack("i", self._tdms_file.read(4))[0] - 2
212 )
213 # Read length of object path:
214 var = struct.unpack("<i", self._tdms_file.read(4))[0]
215 # skip over object path and raw data index:
216 self._tdms_file.seek(var + 4, 1)
217 # Read number of properties in this group:
218 var = struct.unpack("<i", self._tdms_file.read(4))[0]
220 # loop through and read each property
221 properties = [self._read_property() for _ in range(var)]
222 properties = {prop: value for (prop, type, value) in properties}
224 self._end_of_properties_offset = self._tdms_file.tell()
226 self._read_chunk_size()
227 # TODO: Add number of channels to properties
228 return properties
230 def _read_chunk_size(self):
231 """Read the data chunk size from the TDMS file header."""
232 if self._end_of_properties_offset is None:
233 self._read_properties()
235 self._tdms_file.seek(self._end_of_properties_offset, 0)
237 # skip over Group Information:
238 var = struct.unpack("<i", self._tdms_file.read(4))[0]
239 self._tdms_file.seek(var + 8, 1)
241 # skip over first channel path and length of index information:
242 var = struct.unpack("<i", self._tdms_file.read(4))[0]
243 self._tdms_file.seek(var + 4, 1)
245 self._data_type = TDS_DATA_TYPE.get(
246 struct.unpack("<i", self._tdms_file.read(4))[0]
247 )
248 if self._data_type not in ("int16", "float32"):
249 raise Exception("Unsupported TDMS data type: " + self._data_type)
251 # Read Dimension of the raw data array (has to be 1):
252 # dummy = struct.unpack("<i", self._tdms_file.read(4))[0]
254 self._chunk_size = struct.unpack("<i", self._tdms_file.read(4))[0]
256 def get_data(self, first_ch=0, last_ch=None, first_s=0, last_s=None):
257 """
258 Get a block of data from the TDMS file.
259 first_ch -- The first channel to load
260 last_ch -- The last channel to load
261 first_s -- The first sample to load
262 last_s -- The last sample to load
263 """
264 if self._raw_data is None:
265 self._initialise_data()
266 if first_ch is None or first_ch < 0:
267 first_ch = 0
268 if last_ch is None or last_ch >= self.n_channels:
269 last_ch = self.n_channels
270 else:
271 last_ch += 1
272 if last_s is None or last_s > self._channel_length:
273 last_s = self._channel_length
274 else:
275 last_s += 1
276 nch = num.int(max(last_ch - first_ch, 0))
277 ns = num.int(max(last_s - first_s, 0))
279 # Allocate output container
280 data = num.empty((ns, nch), dtype=num.dtype(self._data_type))
281 if data.size == 0:
282 return data
284 # 1. Index first block & reshape?
285 first_blk = first_s // self._chunk_size
286 last_blk = last_s // self._chunk_size
287 last_full_blk = min(last_blk + 1, self._raw_data.shape[1])
288 nchunk = min(
289 max(last_full_blk - first_blk, 0), self._raw_data.shape[1]
290 )
291 first_s_1a = max(first_s - first_blk * self._chunk_size, 0)
292 last_s_1a = min(
293 last_s - first_blk * self._chunk_size, nchunk * self._chunk_size
294 )
295 ind_s = 0
296 ind_e = ind_s + max(last_s_1a - first_s_1a, 0)
298 d = self._raw_data[:, first_blk:last_full_blk, first_ch:last_ch]
299 d.shape = (self._chunk_size * nchunk, nch)
300 d.reshape((self._chunk_size * nchunk, nch), order="F")
301 data[ind_s:ind_e, :] = d[first_s_1a:last_s_1a, :]
303 # 2. Index first additional samples
304 first_s_1b = max(
305 first_s - self._raw_data.shape[1] * self._chunk_size, 0
306 )
307 last_s_1b = min(
308 last_s - self._raw_data.shape[1] * self._chunk_size,
309 self._raw_last_chunk.shape[0],
310 )
311 ind_s = ind_e
312 ind_e = ind_s + max(last_s_1b - first_s_1b, 0)
313 # data_1b = self._raw_last_chunk[first_s_1b:last_s_1b,first_ch:last_ch]
314 if ind_e > ind_s:
315 data[ind_s:ind_e, :] = self._raw_last_chunk[
316 first_s_1b:last_s_1b, first_ch:last_ch
317 ]
319 # 3. Index second block
320 first_s_2 = max(first_s - self._seg1_length, 0)
321 last_s_2 = last_s - self._seg1_length
322 if (first_s_2 > 0 or last_s_2 > 0) and self._raw_data2 is not None:
323 first_blk_2 = max(first_s_2 // self._chunk_size, 0)
324 last_blk_2 = max(last_s_2 // self._chunk_size, 0)
325 last_full_blk_2 = min(last_blk_2 + 1, self._raw_data2.shape[1])
326 nchunk_2 = min(
327 max(last_full_blk_2 - first_blk_2, 0), self._raw_data2.shape[1]
328 )
329 first_s_2a = max(first_s_2 - first_blk_2 * self._chunk_size, 0)
330 last_s_2a = min(
331 last_s_2 - first_blk_2 * self._chunk_size,
332 nchunk_2 * self._chunk_size,
333 )
334 ind_s = ind_e
335 ind_e = ind_s + max(last_s_2a - first_s_2a, 0)
336 # data_2a = self._raw_data2[:, first_blk_2:last_full_blk_2,
337 # first_ch:last_ch]\
338 # .reshape((self._chunk_size*nchunk_2, nch), order='F')\
339 # [first_s_2a:last_s_2a, :]
340 if ind_e > ind_s:
341 data[ind_s:ind_e, :] = self._raw_data2[
342 :, first_blk_2:last_full_blk_2, first_ch:last_ch
343 ].reshape((self._chunk_size * nchunk_2, nch), order="F")[
344 first_s_2a:last_s_2a, :
345 ]
346 # 4. Index second additional samples
347 if (
348 first_s_2 > 0 or last_s_2 > 0
349 ) and self._raw2_last_chunk is not None:
350 first_s_2b = max(
351 first_s_2 - self._raw_data2.shape[1] * self._chunk_size, 0
352 )
353 last_s_2b = min(
354 last_s_2 - self._raw_data2.shape[1] * self._chunk_size,
355 self._raw2_last_chunk.shape[0],
356 )
357 ind_s = ind_e
358 ind_e = ind_s + max(last_s_2b - first_s_2b, 0)
359 # data_2b = \
360 # self._raw2_last_chunk[first_s_2b:last_s_2b,first_ch:last_ch]
361 if ind_e > ind_s:
362 data[ind_s:ind_e, :] = self._raw2_last_chunk[
363 first_s_2b:last_s_2b, first_ch:last_ch
364 ]
365 # 5. Concatenate blocks
366 # data = num.concatenate((data_1a, data_1b, data_2a, data_2b))
367 if data.size == 0:
368 data = data.reshape(0, 0)
369 return data
371 def _initialise_data(self):
372 """Initialise the memory map for the data array."""
373 if self._chunk_size is None:
374 self._read_chunk_size()
376 dmap = mmap.mmap(self._tdms_file.fileno(), 0, access=mmap.ACCESS_READ)
377 rdo = num.int(self.fileinfo["raw_data_offset"])
378 nch = num.int(self.n_channels)
380 # TODO: Support streaming file type?
381 # TODO: Is this a valid calculation for ChannelLength?
382 nso = self.fileinfo["next_segment_offset"]
383 self._seg1_length = num.int(
384 (nso - rdo) / nch / num.dtype(self._data_type).itemsize
385 )
386 self._channel_length = self._seg1_length
388 if self.fileinfo["decimated"]:
389 n_complete_blk = num.int(self._seg1_length / self._chunk_size)
390 ax_ord = "C"
391 else:
392 n_complete_blk = 0
393 ax_ord = "F"
394 self._raw_data = num.ndarray(
395 (n_complete_blk, nch, self._chunk_size),
396 dtype=self._data_type,
397 buffer=dmap,
398 offset=rdo,
399 )
400 # Rotate the axes to [chunk_size, nblk, nch]
401 self._raw_data = num.rollaxis(self._raw_data, 2)
402 additional_samples = num.int(
403 self._seg1_length - n_complete_blk * self._chunk_size
404 )
405 additional_samples_offset = (
406 rdo
407 + n_complete_blk
408 * nch
409 * self._chunk_size
410 * num.dtype(self._data_type).itemsize
411 )
412 self._raw_last_chunk = num.ndarray(
413 (nch, additional_samples),
414 dtype=self._data_type,
415 buffer=dmap,
416 offset=additional_samples_offset,
417 order=ax_ord,
418 )
419 # Rotate the axes to [samples, nch]
420 self._raw_last_chunk = num.rollaxis(self._raw_last_chunk, 1)
422 if self.file_size == nso:
423 self._seg2_length = 0
424 else:
425 self._tdms_file.seek(nso + 12, 0)
426 (seg2_nso, seg2_rdo) = struct.unpack(
427 "<qq", self._tdms_file.read(2 * 8)
428 )
429 self._seg2_length = (
430 (seg2_nso - seg2_rdo)
431 / nch
432 / num.dtype(self._data_type).itemsize
433 )
434 if self.fileinfo["decimated"]:
435 n_complete_blk2 = num.int(self._seg2_length / self._chunk_size)
436 else:
437 n_complete_blk2 = num.int(0)
438 self._raw_data2 = num.ndarray(
439 (n_complete_blk2, nch, self._chunk_size),
440 dtype=self._data_type,
441 buffer=dmap,
442 offset=(nso + LEAD_IN_LENGTH + seg2_rdo),
443 )
444 self._raw_data2 = num.rollaxis(self._raw_data2, 2)
445 additional_samples = num.int(
446 self._seg2_length - n_complete_blk2 * self._chunk_size
447 )
448 additional_samples_offset = (
449 nso
450 + LEAD_IN_LENGTH
451 + seg2_rdo
452 + n_complete_blk2
453 * nch
454 * self._chunk_size
455 * num.dtype(self._data_type).itemsize
456 )
457 self._raw2_last_chunk = num.ndarray(
458 (nch, additional_samples),
459 dtype=self._data_type,
460 buffer=dmap,
461 offset=additional_samples_offset,
462 order=ax_ord,
463 )
464 # Rotate the axes to [samples, nch]
465 self._raw2_last_chunk = num.rollaxis(self._raw2_last_chunk, 1)
467 if self._raw_data2.size != 0 or self._raw2_last_chunk.size != 0:
468 pass
469 # raise Exception('Second segment contains some data, \
470 # not currently supported')
471 self._channel_length = self._seg1_length + self._seg2_length
472 # else:
473 # print "Not decimated"
474 # raise Exception('Reading file with decimated flag not set is not'
475 # ' supported yet')
478META_KEYS = {
479 'measure_length': 'MeasureLength[m]',
480 'start_position': 'StartPosition[m]',
481 'spatial_resolution': 'SpatialResolution[m]',
482 'fibre_index': 'FibreIndex',
483 'unit_calibration': 'Unit Calibration (nm)',
484 'start_distance': 'Start Distance (m)',
485 'stop_distance': 'Stop Distance (m)',
486 'normalization': 'Normalization',
487 'decimation_filter': 'Decimation Filter',
488 'gauge_length': 'GaugeLength',
489 'norm_offset': 'Norm Offset',
490 'source_mode': 'Source Mode',
491 'time_decimation': 'Time Decimation',
492 'zero_offset': 'Zero Offset (m)',
493 'p_parameter': 'P',
494 'p_coefficients': 'P Coefficients',
495 'idas_version': 'iDASVersion',
496 'precice_sampling_freq': 'Precise Sampling Frequency (Hz)',
497 'receiver_gain': 'Receiver Gain',
498 'continuous_mode': 'Continuous Mode',
499 'geo_lat': 'SystemInfomation.GPS.Latitude',
500 'geo_lon': 'SystemInfomation.GPS.Longitude',
501 'geo_elevation': 'SystemInfomation.GPS.Altitude',
503 'channel': None,
504 'unit': None
505}
508def get_meta(tdms_properties):
509 prop = tdms_properties
511 deltat = 1. / prop['SamplingFrequency[Hz]']
512 tmin = prop['GPSTimeStamp'].timestamp()
514 fibre_meta = {key: prop.get(key_map, -1)
515 for key, key_map in META_KEYS.items()
516 if key_map is not None}
518 coeff = fibre_meta['p_coefficients']
519 try:
520 coeff = tuple(map(float, coeff.split('\t')))
521 except ValueError:
522 coeff = tuple(map(float, coeff.split(';')))
524 gain = fibre_meta['receiver_gain']
525 try:
526 gain = tuple(map(float, gain.split('\t')))
527 except ValueError:
528 gain = tuple(map(float, gain.split(';')))
529 fibre_meta['receiver_gain'] = coeff
531 fibre_meta['unit'] = 'radians'
533 return deltat, tmin, fibre_meta
536def iload(filename, load_data=True):
537 tdms = TdmsReader(filename)
538 deltat, tmin, meta = get_meta(tdms.get_properties())
540 data = tdms.get_data().T.copy() if load_data else None
541 nsamples = tdms.channel_length
543 for icha in range(tdms.n_channels):
544 meta_cha = meta.copy()
546 assert icha < 99999
547 station = '%05i' % icha
548 meta_cha['channel'] = icha
550 tr = trace.Trace(
551 network='DA',
552 station=station,
553 ydata=None,
554 deltat=deltat,
555 tmin=tmin,
556 tmax=tmin + nsamples*deltat,
557 meta=meta_cha)
559 if data is not None:
560 tr.set_ydata(data[icha])
562 yield tr
565def detect(first512):
566 return first512.startswith(b'TDSm.')