source: anuga_work/development/Hinwood_2008/prepare_time_boundary.py @ 6823

Last change on this file since 6823 was 5710, checked in by duncan, 16 years ago

comments

File size: 9.7 KB
Line 
1"""
2Script for running a breaking wave simulation of Jon Hinwoods wave tank.
3Note: this is based on the frinction_ua_flume_2006 structure.
4
5
6Duncan Gray, GA - 2007
7
8"""
9
10
11#----------------------------------------------------------------------------
12# Import necessary modules
13#----------------------------------------------------------------------------
14
15from Scientific.IO.NetCDF import NetCDFFile
16from Numeric import array, zeros, Float
17from os.path import join
18 
19from anuga.utilities.numerical_tools import ensure_numeric
20
21from anuga.utilities.interp import interp
22
23
24import project
25
26# from os import getenv
27# from os.path import join
28# home = getenv('INUNDATIONHOME')
29# Hinwood_dir = join(home,'data','flumes','Hinwood_2008')
30# raw_data_dir = join(Hinwood_dir, 'raw_data')
31   
32def csv2tms(filename, offshore_bed_elevation):
33    """
34    Convert Hinwood boundary file to NetCDF tms file.
35    the filename is the name of the output tms file, eg 'hi.tsm'.
36    There must be an equivalent .csv file, eg 'hi.csv'.
37   
38    """
39
40    print 'Creating', filename
41
42    # Read the ascii (.csv) version of this file
43    fid = open(filename[:-4] + '.csv')
44
45    # Read remaining lines
46    lines = fid.readlines()
47    fid.close()
48
49
50    N = len(lines)
51    T = zeros(N, Float)  #Time
52    Q = zeros(N, Float)  #Stage
53    X = zeros(N, Float)  #XMomentum
54    Y = zeros(N, Float)  #YMomentum
55
56    for i, line in enumerate(lines):
57        fields = line.split(',')
58
59        T[i] = float(fields[0])
60        Q[i] = float(fields[1])
61        depth = Q[i] - offshore_bed_elevation
62        X[i] = float(fields[2]) * depth
63        try:
64            Y[i] = float(fields[3]) * depth
65        except:
66            pass
67
68
69   
70    # Create tms NetCDF file
71    fid = NetCDFFile(filename, 'w')
72    fid.institution = 'Geoscience Australia'
73    fid.description = 'Input wave for Benchmark 2'
74    fid.starttime = 0.0
75    fid.createDimension('number_of_timesteps', len(T))
76    fid.createVariable('time', Float, ('number_of_timesteps',))
77    fid.variables['time'][:] = T
78
79    fid.createVariable('stage', Float, ('number_of_timesteps',))
80    fid.variables['stage'][:] = Q[:]
81
82    fid.createVariable('xmomentum', Float, ('number_of_timesteps',))
83    fid.variables['xmomentum'][:] =  X[:]
84
85    fid.createVariable('ymomentum', Float, ('number_of_timesteps',))
86    fid.variables['ymomentum'][:] =  Y[:]
87
88    fid.close()
89
90
91def combine_velocity_depth(velocity_file, depth_file, out_file, metadata_dic):
92    """
93   
94    Convert the raw velocity and depth values, which have values at
95    different times to a csv file, with values at the same time, with
96    SI units.
97
98    Set the depth values to be at the same times as the velocity values.
99
100    The format for the velocity file is;
101    [time, sec], [x-velocity +ve is towards the wave generator, cm/sec],
102    [y-velocity], [z-velocity]
103
104    The format for the pressure file is
105    [time, sec], [mm above SWL for sensor A], many other sensors...
106
107    The format of the output file is;
108    [time, sec], [m above SWL for sensor A],
109    [x-velocity +ve is towards the shore, m/sec],
110    [y-velocity +ve is towards the left of the tank, looking from the
111    generator to the shore, m/sec]
112   
113   
114    """
115    missing = 1e+20
116   
117    # Read velocity file
118    #print "*********************"
119    #print "velocity_file", velocity_file
120    vfid = open(velocity_file)
121    lines = vfid.readlines()
122    vfid.close()
123
124   
125    n_velocity = len(lines)
126    vtimes = zeros(n_velocity, Float)  #Time
127    x_velocities = zeros(n_velocity, Float)  #
128    y_velocities = zeros(n_velocity, Float)  #
129
130    for i, line in enumerate(lines):
131        fields = line.split() #(',')
132
133        vtimes[i] = float(fields[0])
134        x_velocities[i] = float(fields[1])
135        y_velocities[i] = float(fields[2])
136
137    # Read the depth file
138    #print "depth_file", depth_file
139    dfid = open(depth_file)
140    lines = dfid.readlines()
141    dfid.close()
142
143   
144    n_depth = len(lines)
145    n_sensors = len(lines[0].split())
146    dtimes = zeros(n_depth, Float)  #Time
147    depths = zeros(n_depth, Float)  #
148    sensors = zeros((n_depth,n_sensors), Float)
149   
150    for i, line in enumerate(lines):
151        fields = line.split() #(',')
152        fields = [float(j) for j in fields]
153        dtimes[i] = fields[0]
154        depths[i] = fields[1]
155        sensors[i] = fields
156    #print "dtimes", dtimes
157    #print "depths", depths
158    #print "vtimes", vtimes
159    depths_at_vtimes = interp( depths, dtimes,
160                               vtimes, missing=missing)
161    depths_at_vtimes = ensure_numeric(depths_at_vtimes)
162
163    #print "len(dtimes)", len(vtimes)
164    #print "len(depths_at_vtimes)", len(depths_at_vtimes)
165    #print "metadata_dic['scenario_id']", metadata_dic['scenario_id']
166 #    for i in range(len(depths_at_vtimes)):
167#         print "i", i
168#         print "vtimes[i]", vtimes[i]
169#         print "depths_at_vtimes[i]", depths_at_vtimes[i]
170
171    #print "depths_at_vtimes",  depths_at_vtimes
172    depths_at_vtimes = depths_at_vtimes/1000.00 # convert from mm to m
173    missing=missing/1000.00 # Do to missing what is done to depths_at_vtimes
174    x_velocities = ensure_numeric(x_velocities)
175    # Swap axis around convert cm/sec to m/sec
176    x_velocities = x_velocities * -0.01 
177    y_velocities = ensure_numeric(y_velocities)
178    # Swap axis around convert cm/sec to m/sec
179    y_velocities = y_velocities * -0.01
180   
181    fid = open(out_file,'w')
182
183    assert len(depths_at_vtimes) == len(vtimes)
184    start_time = None
185    #start_time = 0.0
186    #for vtime, depth_at_vtime, velocity in map(vtimes, depths_at_vtimes,
187    #                                           velocities):
188    for i in xrange(len(vtimes)):
189        if not depths_at_vtimes[i] == missing:
190            # Make the times start at zero.
191            if start_time is None:
192                start_time = vtimes[i]
193            final_time = vtimes[i]-start_time
194            fid.write(str(final_time) \
195                      + ',' + str(depths_at_vtimes[i]) \
196                      + ',' + str(x_velocities[i]) \
197                      + ',' + str(y_velocities[i])+'\n')
198
199    fid.close()
200    print "The start time for is", start_time
201    if not start_time == metadata_dic['ANUGA_start_time']:
202        raise ValueError, "The calc'ed and recorded start times are different"
203    # Modify the sensor array to reflect the new start time
204    sensors[:,0] -= start_time
205   
206    # Since there is a new time reference save the depth info using this
207    # new reference.
208    fid = open(depth_file[:-4] + '_exp_depth.csv','w')
209    #print "depth_file", depth_file
210    #print "start_time", start_time
211
212   
213    bed_elevation_list = metadata_dic['gauge_bed_elevation']
214    # +2 due to Time column and gauge A
215    max_j = len(bed_elevation_list)+2
216    # Write a header
217    fid.write('Time')
218    for gauge_x in metadata_dic['gauge_x']:
219        fid.write(',' + str(gauge_x)) 
220    fid.write('\n')
221    for i in xrange(len(dtimes)):       
222        fid.write(str(sensors[i,0])) # Time
223        # Don't write sensor A.
224        # It is the boundary condition.
225        for j, bed_elevation in map(None,
226                                    xrange(2,max_j),
227                                    bed_elevation_list):
228            # depth, m
229            gauge = sensors[i,j]/1000 - bed_elevation # Convert from mm to m
230            fid.write(',' + str(gauge)) 
231        fid.write('\n')
232    fid.close()
233
234   
235    # Since there is a new time reference save the stage info using this
236    # new reference.
237    fid = open(depth_file[:-4] + '_exp_stage.csv','w')
238   
239    # Write a header
240    fid.write('Time')
241    for gauge_x in metadata_dic['gauge_x']:
242        fid.write(',' + str(gauge_x)) 
243    fid.write('\n')
244    for i in xrange(len(dtimes)):       
245        fid.write(str(sensors[i,0])) # Time
246        # Don't write sensor A.
247        # It is the boundary condition.
248        for j in xrange(2,max_j):
249            # stage, m
250            gauge = sensors[i,j]/1000 # Convert from mm to m
251            fid.write(',' + str(gauge)) 
252        fid.write('\n')
253    fid.close()
254   
255    return final_time
256
257def prepare_time_boundary(metadata_dic, raw_data_dir, output_dir):
258    """
259    Use this if a project instance has already been created.
260    """
261    scenario_id = metadata_dic['scenario_id']
262    velocity_file = join(raw_data_dir,scenario_id+'velfilt.txt')
263    depth_file = join(raw_data_dir,scenario_id+'pressfilt.txt')
264    out_file = join(output_dir, scenario_id+'_boundary.csv')
265    #out_depth_file = join(output_dir, scenario_id+'_exp_depth.csv')
266   
267    final_time = combine_velocity_depth(velocity_file, depth_file, out_file,
268                                        metadata_dic)
269    #print "metadata_dic['xleft'][1]", metadata_dic['xleft'][1]
270    if metadata_dic['xleft'][1] >= 0.0:
271        # This should be a -ve value, since the still water level is the
272        # z origin.
273        print "Warning: The z origin seems incorrect."     
274    tsm_file = out_file[:-4] + '.tsm' 
275    csv2tms(tsm_file, metadata_dic['xleft'][1])
276    return final_time
277
278# Don't do this, since run-dam changes the  metadata_dic['xleft'][1],
279# which is used by this function
280def prepare_time_boundary_for_scenarios_warning(scenarios,
281                                        outputdir_tag):
282
283    for run_data in scenarios:       
284        id = run_data['scenario_id']
285        outputdir_name = id + outputdir_tag
286        pro_instance = project.Project(['data','flumes','Hinwood_2008'],
287                                       outputdir_name=outputdir_name)
288        prepare_time_boundary(run_data,
289                              pro_instance.raw_data_dir,
290                              pro_instance.boundarydir)
291       
292   
293#-------------------------------------------------------------------
294if __name__ == "__main__":
295   
296    import sys; sys.exit()
297   
298    from scenarios import scenarios
299    outputdir_tag = "XXXX"
300    prepare_time_boundary_for_scenarios(scenarios,
301                                        outputdir_tag)
Note: See TracBrowser for help on using the repository browser.