source: anuga_work/development/Hinwood_2008/prepare_time_boundary.py @ 5577

Last change on this file since 5577 was 5459, checked in by duncan, 16 years ago

Current Hinwood scenario

File size: 9.7 KB
Line 
1"""
2Script for running a breaking wave simulation of Jon Hinwoods wave tank.
3Note: this is based on the frinction_ua_flume_2006 structure.
4
5
6Duncan Gray, GA - 2007
7
8"""
9
10
11#----------------------------------------------------------------------------
12# Import necessary modules
13#----------------------------------------------------------------------------
14
15from Scientific.IO.NetCDF import NetCDFFile
16from Numeric import array, zeros, Float
17from os.path import join
18 
19from anuga.utilities.numerical_tools import ensure_numeric
20
21from interp import interp
22
23
24import project
25
26# from os import getenv
27# from os.path import join
28# home = getenv('INUNDATIONHOME')
29# Hinwood_dir = join(home,'data','flumes','Hinwood_2008')
30# raw_data_dir = join(Hinwood_dir, 'raw_data')
31   
32def csv2tms(filename, offshore_bed_elevation):
33    """Convert benchmark 2 time series to NetCDF tms file.
34    the filename is the name of the output tms file, eg 'hi.tsm'.
35    There must be an equivalent .csv file, eg 'hi.csv'.
36   
37    """
38
39
40
41    print 'Creating', filename
42
43    # Read the ascii (.csv) version of this file
44    fid = open(filename[:-4] + '.csv')
45
46    # Read remaining lines
47    lines = fid.readlines()
48    fid.close()
49
50
51    N = len(lines)
52    T = zeros(N, Float)  #Time
53    Q = zeros(N, Float)  #Stage
54    X = zeros(N, Float)  #XMomentum
55    Y = zeros(N, Float)  #YMomentum
56
57    for i, line in enumerate(lines):
58        fields = line.split(',')
59
60        T[i] = float(fields[0])
61        Q[i] = float(fields[1])
62        depth = Q[i] - offshore_bed_elevation
63        X[i] = float(fields[2]) * depth
64        try:
65            Y[i] = float(fields[3]) * depth
66        except:
67            pass
68
69
70   
71    # Create tms NetCDF file
72    fid = NetCDFFile(filename, 'w')
73    fid.institution = 'Geoscience Australia'
74    fid.description = 'Input wave for Benchmark 2'
75    fid.starttime = 0.0
76    fid.createDimension('number_of_timesteps', len(T))
77    fid.createVariable('time', Float, ('number_of_timesteps',))
78    fid.variables['time'][:] = T
79
80    fid.createVariable('stage', Float, ('number_of_timesteps',))
81    fid.variables['stage'][:] = Q[:]
82
83    fid.createVariable('xmomentum', Float, ('number_of_timesteps',))
84    fid.variables['xmomentum'][:] =  X[:]
85
86    fid.createVariable('ymomentum', Float, ('number_of_timesteps',))
87    fid.variables['ymomentum'][:] =  Y[:]
88
89    fid.close()
90
91
92def combine_velocity_depth(velocity_file, depth_file, out_file, metadata_dic):
93    """
94   
95    Convert the raw velocity and depth values, which have values at
96    different times to a csv file, with values at the same time, with
97    SI units.
98
99    Set the depth values to be at the same times as the velocity values.
100
101    The format for the velocity file is;
102    [time, sec], [x-velocity +ve is towards the wave generator, cm/sec],
103    [y-velocity], [z-velocity]
104
105    The format for the pressure file is
106    [time, sec], [mm above SWL for sensor A], many other sensors...
107
108    The format of the output file is;
109    [time, sec], [m above SWL for sensor A],
110    [x-velocity +ve is towards the shore, m/sec],
111    [y-velocity +ve is towards the left of the tank, looking from the
112    generator to the shore, m/sec]
113   
114   
115    """
116    missing = 1e+20
117   
118    # Read velocity file
119    #print "*********************"
120    #print "velocity_file", velocity_file
121    vfid = open(velocity_file)
122    lines = vfid.readlines()
123    vfid.close()
124
125   
126    n_velocity = len(lines)
127    vtimes = zeros(n_velocity, Float)  #Time
128    x_velocities = zeros(n_velocity, Float)  #
129    y_velocities = zeros(n_velocity, Float)  #
130
131    for i, line in enumerate(lines):
132        fields = line.split() #(',')
133
134        vtimes[i] = float(fields[0])
135        x_velocities[i] = float(fields[1])
136        y_velocities[i] = float(fields[2])
137
138    # Read the depth file
139    #print "depth_file", depth_file
140    dfid = open(depth_file)
141    lines = dfid.readlines()
142    dfid.close()
143
144   
145    n_depth = len(lines)
146    n_sensors = len(lines[0].split())
147    dtimes = zeros(n_depth, Float)  #Time
148    depths = zeros(n_depth, Float)  #
149    sensors = zeros((n_depth,n_sensors), Float)
150   
151    for i, line in enumerate(lines):
152        fields = line.split() #(',')
153        fields = [float(j) for j in fields]
154        dtimes[i] = fields[0]
155        depths[i] = fields[1]
156        sensors[i] = fields
157    #print "dtimes", dtimes
158    #print "depths", depths
159    #print "vtimes", vtimes
160    depths_at_vtimes = interp( depths, dtimes,
161                               vtimes, missing=missing)
162    depths_at_vtimes = ensure_numeric(depths_at_vtimes)
163
164    #print "len(dtimes)", len(vtimes)
165    #print "len(depths_at_vtimes)", len(depths_at_vtimes)
166    #print "metadata_dic['scenario_id']", metadata_dic['scenario_id']
167 #    for i in range(len(depths_at_vtimes)):
168#         print "i", i
169#         print "vtimes[i]", vtimes[i]
170#         print "depths_at_vtimes[i]", depths_at_vtimes[i]
171
172    #print "depths_at_vtimes",  depths_at_vtimes
173    depths_at_vtimes = depths_at_vtimes/1000.00 # convert from mm to m
174    missing=missing/1000.00 # Do to missing what is done to depths_at_vtimes
175    x_velocities = ensure_numeric(x_velocities)
176    # Swap axis around convert cm/sec to m/sec
177    x_velocities = x_velocities * -0.01 
178    y_velocities = ensure_numeric(y_velocities)
179    # Swap axis around convert cm/sec to m/sec
180    y_velocities = y_velocities * -0.01
181   
182    fid = open(out_file,'w')
183
184    assert len(depths_at_vtimes) == len(vtimes)
185    start_time = None
186    #start_time = 0.0
187    #for vtime, depth_at_vtime, velocity in map(vtimes, depths_at_vtimes,
188    #                                           velocities):
189    for i in xrange(len(vtimes)):
190        if not depths_at_vtimes[i] == missing:
191            # Make the times start at zero.
192            if start_time is None:
193                start_time = vtimes[i]
194            final_time = vtimes[i]-start_time
195            fid.write(str(final_time) \
196                      + ',' + str(depths_at_vtimes[i]) \
197                      + ',' + str(x_velocities[i]) \
198                      + ',' + str(y_velocities[i])+'\n')
199
200    fid.close()
201    print "The start time for is", start_time
202    if not start_time == metadata_dic['ANUGA_start_time']:
203        raise ValueError, "The calc'ed and recorded start times are different"
204    # Modify the sensor array to reflect the new start time
205    sensors[:,0] -= start_time
206   
207    # Since there is a new time reference save the depth info using this
208    # new reference.
209    fid = open(depth_file[:-4] + '_exp_depth.csv','w')
210    #print "depth_file", depth_file
211    #print "start_time", start_time
212
213   
214    bed_elevation_list = metadata_dic['gauge_bed_elevation']
215    # +2 due to Time column and gauge A
216    max_j = len(bed_elevation_list)+2
217    # Write a header
218    fid.write('Time')
219    for gauge_x in metadata_dic['gauge_x']:
220        fid.write(',' + str(gauge_x)) 
221    fid.write('\n')
222    for i in xrange(len(dtimes)):       
223        fid.write(str(sensors[i,0])) # Time
224        # Don't write sensor A.
225        # It is the boundary condition.
226        for j, bed_elevation in map(None,
227                                    xrange(2,max_j),
228                                    bed_elevation_list):
229            # depth, m
230            gauge = sensors[i,j]/1000 - bed_elevation # Convert from mm to m
231            fid.write(',' + str(gauge)) 
232        fid.write('\n')
233    fid.close()
234
235   
236    # Since there is a new time reference save the stage info using this
237    # new reference.
238    fid = open(depth_file[:-4] + '_exp_stage.csv','w')
239   
240    # Write a header
241    fid.write('Time')
242    for gauge_x in metadata_dic['gauge_x']:
243        fid.write(',' + str(gauge_x)) 
244    fid.write('\n')
245    for i in xrange(len(dtimes)):       
246        fid.write(str(sensors[i,0])) # Time
247        # Don't write sensor A.
248        # It is the boundary condition.
249        for j in xrange(2,max_j):
250            # stage, m
251            gauge = sensors[i,j]/1000 # Convert from mm to m
252            fid.write(',' + str(gauge)) 
253        fid.write('\n')
254    fid.close()
255   
256    return final_time
257
258def prepare_time_boundary(metadata_dic, raw_data_dir, output_dir):
259    """
260    Use this if a project instance has already been created.
261    """
262    scenario_id = metadata_dic['scenario_id']
263    velocity_file = join(raw_data_dir,scenario_id+'velfilt.txt')
264    depth_file = join(raw_data_dir,scenario_id+'pressfilt.txt')
265    out_file = join(output_dir, scenario_id+'_boundary.csv')
266    #out_depth_file = join(output_dir, scenario_id+'_exp_depth.csv')
267   
268    final_time = combine_velocity_depth(velocity_file, depth_file, out_file,
269                                        metadata_dic)
270    #print "metadata_dic['xleft'][1]", metadata_dic['xleft'][1]
271    if metadata_dic['xleft'][1] >= 0.0:
272        # This should be a -ve value, since the still water level is the
273        # z origin.
274        print "Warning: The z origin seems incorrect."     
275    tsm_file = out_file[:-4] + '.tsm' 
276    csv2tms(tsm_file, metadata_dic['xleft'][1])
277    return final_time
278
279# Don't do this, since run-dam changes the  metadata_dic['xleft'][1],
280# which is used by this function
281def prepare_time_boundary_for_scenarios_warning(scenarios,
282                                        outputdir_tag):
283
284    for run_data in scenarios:       
285        id = run_data['scenario_id']
286        outputdir_name = id + outputdir_tag
287        pro_instance = project.Project(['data','flumes','Hinwood_2008'],
288                                       outputdir_name=outputdir_name)
289        prepare_time_boundary(run_data,
290                              pro_instance.raw_data_dir,
291                              pro_instance.boundarydir)
292       
293   
294#-------------------------------------------------------------------
295if __name__ == "__main__":
296   
297    import sys; sys.exit()
298   
299    from scenarios import scenarios
300    outputdir_tag = "XXXX"
301    prepare_time_boundary_for_scenarios(scenarios,
302                                        outputdir_tag)
Note: See TracBrowser for help on using the repository browser.