1import numpy as num 

2from pyrocko import gf, trace 

3from pyrocko.guts import Object, Float, StringChoice, List, String 

4from pyrocko.gui import marker 

5from grond.meta import store_t 

6 

7 

8guts_prefix = 'grond' 

9 

10 

11response_wwssn_lp = trace.PoleZeroResponse( 

12 poles=[ 

13 -0.2513 + 0.3351J, 

14 -0.2513 - 0.3351J, 

15 -0.0628 + 0.0304J, 

16 -0.0628 - 0.0304J], 

17 zeros=[0., 0., 0.], 

18 constant=383.6) 

19 

20 

21response_wwssn_lp_2 = trace.PoleZeroResponse( 

22 poles=[ 

23 -0.40180 + 0.08559J, 

24 -0.40180 - 0.08559J, 

25 -0.04841, 

26 -0.08816], 

27 zeros=[0., 0., 0.]) 

28 

29response_wa = trace.PoleZeroResponse( 

30 poles=[ 

31 -5.49779 - 5.60886J, 

32 -5.49779 + 5.60886J], 

33 zeros=[0., 0.]) 

34 

35 

36class NamedResponse(StringChoice): 

37 choices = [ 

38 'wood-anderson', 

39 'wwssn-lp'] 

40 

41 map = { 

42 'wood-anderson': response_wa, 

43 'wwssn-lp': response_wwssn_lp} 

44 

45 

46class BruneResponse(trace.FrequencyResponse): 

47 

48 duration = Float.T() 

49 

50 def evaluate(self, freqs): 

51 return 1.0 / (1.0 + (freqs*self.duration)**2) 

52 

53# from pyrocko.plot import response as pr 

54# pr.plot([response_wwssn_lp, response_wwssn_lp_2, response_wa]) 

55 

56 

57class FeatureMethod(StringChoice): 

58 choices = [ 

59 'peak_component', 

60 'peak_to_peak_component', 

61 'peak_absolute_vector', 

62 'spectral_average'] 

63 

64 

65class WaveformQuantity(StringChoice): 

66 choices = [ 

67 'displacement', 

68 'velocity', 

69 'acceleration'] 

70 

71 

72class FeatureMeasurementFailed(Exception): 

73 pass 

74 

75 

76class FeatureMeasure(Object): 

77 name = String.T() 

78 timing_tmin = gf.Timing.T(default='vel:8') 

79 timing_tmax = gf.Timing.T(default='vel:2') 

80 fmin = Float.T(optional=True) 

81 fmax = Float.T(optional=True) 

82 response = trace.FrequencyResponse.T(optional=True) 

83 named_response = NamedResponse.T(optional=True) 

84 channels = List.T(String.T()) 

85 quantity = WaveformQuantity.T(default='displacement') 

86 method = FeatureMethod.T(default='peak_component') 

87 

88 def get_nmodelling_targets(self): 

89 return len(self.channels) 

90 

91 def get_modelling_targets( 

92 self, codes, lat, lon, depth, store_id, backazimuth): 

93 

94 mtargets = [] 

95 for channel in self.channels: 

96 target = gf.Target( 

97 quantity='displacement', 

98 codes=codes + (channel, ), 

99 lat=lat, 

100 lon=lon, 

101 depth=depth, 

102 store_id=store_id) 

103 

104 if channel == 'R': 

105 target.azimuth = backazimuth - 180. 

106 target.dip = 0. 

107 elif channel == 'T': 

108 target.azimuth = backazimuth - 90. 

109 target.dip = 0. 

110 elif channel == 'Z': 

111 target.azimuth = 0. 

112 target.dip = -90. 

113 

114 mtargets.append(target) 

115 

116 return mtargets 

117 

118 def evaluate( 

119 self, engine, source, targets, 

120 dataset=None, 

121 trs=None, 

122 extra_responses=[], 

123 debug=False): 

124 

125 from ..waveform import target as base 

126 

127 trs_processed = [] 

128 trs_orig = [] 

129 for itarget, target in enumerate(targets): 

130 if target.codes[-1] not in self.channels: 

131 continue 

132 

133 store = engine.get_store(target.store_id) 

134 

135 tmin = source.time + store_t( 

136 store, self.timing_tmin, source, target) 

137 tmax = source.time + store_t( 

138 store, self.timing_tmax, source, target) 

139 

140 if self.fmin is not None and self.fmax is not None: 

141 freqlimits = [ 

142 self.fmin/2., 

143 self.fmin, 

144 self.fmax, 

145 self.fmax*2.] 

146 tfade = 1./self.fmin 

147 

148 else: 

149 freqlimits = None 

150 tfade = 0.0 

151 

152 if dataset is not None: 

153 bazi = base.backazimuth_for_waveform( 

154 target.azimuth, target.codes) 

155 

156 tr = dataset.get_waveform( 

157 target.codes, 

158 tinc_cache=1.0/self.fmin, 

159 quantity=self.quantity, 

160 tmin=tmin, 

161 tmax=tmax, 

162 freqlimits=freqlimits, 

163 tfade=tfade, 

164 deltat=store.config.deltat, 

165 cache=True, 

166 backazimuth=bazi) 

167 

168 else: 

169 tr = trs[itarget] 

170 

171 tr.extend( 

172 tmin - tfade, 

173 tmax + tfade, 

174 fillmethod='repeat') 

175 

176 tr = tr.transfer( 

177 freqlimits=freqlimits, 

178 tfade=tfade) 

179 

180 trs_orig.append(tr) 

181 

182 tr = tr.copy() 

183 

184 responses = [] 

185 responses.extend(extra_responses) 

186 

187 ndiff = \ 

188 WaveformQuantity.choices.index(self.quantity) - \ 

189 WaveformQuantity.choices.index(target.quantity) 

190 

191 if ndiff > 0: 

192 responses.append(trace.DifferentiationResponse(ndiff)) 

193 

194 if ndiff < 0: 

195 responses.append(trace.IntegrationResponse(-ndiff)) 

196 

197 if self.response: 

198 responses.append(self.response) 

199 

200 if self.named_response: 

201 responses.append( 

202 NamedResponse.map[self.named_response]) 

203 

204 if responses: 

205 trans = trace.MultiplyResponse(responses) 

206 try: 

207 tr = tr.transfer(transfer_function=trans) 

208 

209 except trace.TraceTooShort: 

210 raise FeatureMeasurementFailed( 

211 'transfer: trace too short') 

212 

213 if tmin is None or tmax is None: 

214 raise FeatureMeasurementFailed( 

215 'timing determination failed (phase unavailable?)') 

216 

217 tr.chop(tmin, tmax) 

218 

219 tr.set_location(tr.location + '-' + self.name + '-proc') 

220 trs_processed.append(tr) 

221 

222 markers = [] 

223 marker_candidates = [] 

224 if self.method in ['peak_component', 'peak_to_peak_component']: 

225 component_amp_maxs = [] 

226 for tr in trs_processed: 

227 y = tr.get_ydata() 

228 if self.method == 'peak_component': 

229 yabs = num.abs(y) 

230 i_at_amax = num.argmax(yabs) 

231 amax = yabs[i_at_amax] 

232 if debug: 

233 t_at_amax = tr.tmin + i_at_amax * tr.deltat 

234 mark = marker.Marker( 

235 [tr.nslc_id], 

236 t_at_amax, 

237 t_at_amax, 

238 0) 

239 

240 marker_candidates.append(mark) 

241 

242 component_amp_maxs.append(amax) 

243 else: 

244 i_at_amax = num.argmax(y) 

245 i_at_amin = num.argmin(y) 

246 amax = y[i_at_amax] 

247 amin = y[i_at_amin] 

248 if debug: 

249 t_at_amax = tr.tmin + i_at_amax * tr.deltat 

250 t_at_amin = tr.tmin + i_at_amin * tr.deltat 

251 ts = sorted([t_at_amax, t_at_amin]) 

252 mark = marker.Marker( 

253 [tr.nslc_id], 

254 ts[0], 

255 ts[1], 

256 0) 

257 

258 marker_candidates.append(mark) 

259 

260 component_amp_maxs.append(amax - amin) 

261 

262 i_at_amax = num.argmax(component_amp_maxs) 

263 if debug: 

264 markers.append(marker_candidates[i_at_amax]) 

265 amp_max = component_amp_maxs[i_at_amax] 

266 

267 elif self.method == 'peak_absolute_vector': 

268 trsum = None 

269 for tr in trs_processed: 

270 tr.set_ydata(tr.get_ydata()**2) 

271 if trsum is None: 

272 trsum = tr 

273 else: 

274 trsum.add(tr) 

275 

276 trsum.set_ydata(num.sqrt(tr.get_ydata)) 

277 trsum.set_codes(channel='SUM') 

278 

279 yabs = trsum.get_ydata() 

280 

281 i_at_amax = num.argmax(yabs) 

282 amax = yabs[i_at_amax] 

283 t_at_amax = tr.tmin + i_at_amax * tr.deltat 

284 amp_max = amax 

285 

286 if debug: 

287 markers.append(marker.Marker( 

288 [trsum.nslc_id], 

289 t_at_amax, 

290 t_at_amax, 

291 0)) 

292 

293 trs_processed.append(trsum) 

294 

295 elif self.method == 'spectral_average': 

296 component_amp_maxs = [] 

297 for tr in trs_processed: 

298 freqs, values = tr.spectrum() 

299 component_amp_maxs.append(num.mean(num.abs(values[ 

300 num.logical_and(self.fmin <= freqs, freqs <= self.fmax)]))) 

301 

302 amp_max = num.mean(component_amp_maxs) 

303 

304 if debug: 

305 trs_out = [] 

306 for tr in trs_orig: 

307 tr_out = tr.copy() 

308 tr_out.set_location(tr_out.location + '-' + self.name) 

309 trs_out.append(tr_out) 

310 

311 return amp_max, (trs_out + trs_processed, markers) 

312 

313 return amp_max, None