source: branches/anuga_1_1/anuga_work/production/bunbury_storm_surge_2009/process_gems_grids.py @ 7794

Last change on this file since 7794 was 7794, checked in by sexton, 15 years ago

updates to bunbury scripts

File size: 9.3 KB
Line 
1# ---------------------------------------------------------------------------
2# This python script reads in GEMSURGE outputs and writes them into a master
3# STS file that contains all the data across the entire model domain at all
4# timesteps. It requires a vertically flipped elevation ASCII grid which is
5# merged with the GEMSURGE data to calculate the required quantities of
6# xmomentum and ymomentum.
7# Written by Nariman Habili and Leharne Fountain, 2010
8# ---------------------------------------------------------------------------
9
10import glob
11import os
12import gzip
13import pdb 
14import numpy
15import math
16from Scientific.IO.NetCDF import NetCDFFile
17from anuga.coordinate_transforms.geo_reference import Geo_reference, write_NetCDF_georeference
18
19#-----------------
20# Directory setup
21#-------------------
22state = 'western_australia'
23scenario_folder = 'bunbury_storm_surge_scenario_2009'
24event = '20100527_gcom_12min'
25
26print "Processing event: ", event
27
28ENV_INUNDATIONHOME = 'INUNDATIONHOME'
29home = os.path.join(os.getenv(ENV_INUNDATIONHOME), 'data') # Absolute path for data folder
30# GEMS_folder = os.path.join(home, state, scenario_folder, 'GEMS')
31anuga_folder = os.path.join(home, state, scenario_folder, 'anuga')
32boundaries_folder = os.path.join(anuga_folder, 'boundaries')
33event_folder = os.path.join(boundaries_folder, event)
34
35# input_dir = os.path.join(GEMS_folder, event)
36
37#-----------------------
38# Input files from GEMS
39#-----------------------
40elevation_file_name = os.path.join(event_folder, 'buntopog_20m_flip.asc')  # Name of the vertically flipped elevation grid
41
42print "Elevation file name: ", elevation_file_name
43
44grid_file_names = glob.glob(os.path.join(event_folder, 'gcom*.gz'))
45grid_file_names.sort()
46
47#grid_file_names_temp = []
48# this was to subsample the gz files
49#for i in range(1, 32, 1):
50#    grid_file_names_temp.append(grid_file_names[i])
51
52#grid_file_names = grid_file_names_temp
53   
54number_of_timesteps = len(grid_file_names)
55
56print "Number of timesteps: ", number_of_timesteps
57
58start_time = 0      # all times in seconds
59timestep = 720     # all times in seconds
60end_time =  start_time + (timestep*number_of_timesteps) # all times in seconds
61refzone = 50        # UTM zone of model
62event_sts = os.path.join(event_folder, event)
63
64elevation = []
65
66elevation_file = open(elevation_file_name, 'rb')
67lines = elevation_file.readlines()
68elevation_file.close()
69
70# Strip off the ASCII header and also read ncols, nrows,
71# x_origin, y_origin, grid_size, no_data value, and read in elevation grid:
72for i, L in enumerate(lines):
73    if i == 0:
74        ncols = int(L.strip().split()[1])
75    if i == 1:
76        nrows = int(L.strip().split()[1])
77    if i == 2:
78        x_origin = int(L.strip().split()[1])
79    if i == 3:
80        y_origin = int(L.strip().split()[1])
81    if i == 4:
82        grid_size = int(L.strip().split()[1])
83    if i == 5:
84        no_data = int(float(L.strip().split()[1]))
85    if i > 5:
86        elevation+= L.strip().split()
87
88print 'Number or columns: ', ncols
89print 'Number of rows: ', nrows
90print 'X origin: ', x_origin
91print 'Y origin: ', y_origin
92print 'Grid size: ', grid_size
93print 'No data value: ', no_data
94
95#------------------------------------------------------
96# Create arrays of elevation, depth, current_x and current_y
97#-------------------------------------------------------
98print 'creating numpy arrays: elevation, depth, current_x, current_y'
99
100depth = numpy.empty(number_of_timesteps*nrows*ncols, dtype=float)
101current_x = numpy.empty(number_of_timesteps*nrows*ncols, dtype=float)
102current_y = numpy.empty(number_of_timesteps*nrows*ncols, dtype=float)
103
104for j, f in enumerate(grid_file_names):
105    print 'file: ', f
106   
107    gz_file = gzip.open(f, 'rb')
108
109    d = []
110    cx = []
111    cy = []
112
113    for i, L in enumerate(gz_file):
114       
115        #if i > 7 and i < (8 + nrows): #  7 refers to the number of rows of header info that we skip - always CHECK format
116        #    print 'first block'
117
118        #if i > (9 + nrows) and i < (10 + 2*nrows):
119        #    print 'second block'
120
121        #if i > (11 + 2*nrows):
122        #    print 'third block'
123
124        if i > 13 + 3*nrows and i < (14 + 4*nrows):   
125            d += L.strip().split()
126           
127        if i > (15 + 4*nrows) and i < (16 + 5*nrows):
128            cx += L.strip().split()
129           
130        if i > (17 + 5*nrows):
131            cy += L.strip().split()
132
133    depth[j*nrows*ncols : (j+1)*nrows*ncols] = numpy.flipud(numpy.array(d).astype('d'))
134    current_x[j*nrows*ncols : (j+1)*nrows*ncols] = numpy.flipud(numpy.array(cx).astype('d'))
135    current_y[j*nrows*ncols : (j+1)*nrows*ncols] = numpy.flipud(numpy.array(cy).astype('d'))
136
137    gz_file.close()
138
139elevation = numpy.array(elevation).astype('d')
140
141print 'Size of elevation array: ', elevation.size
142print 'Size of depth array: ', depth.size
143print 'Size of current x array: ', current_x.size
144print 'Size of current y array: ', current_y.size
145
146assert depth.size == current_x.size == current_y.size == ncols * nrows * number_of_timesteps
147assert depth.size == number_of_timesteps * elevation.size
148
149stage = numpy.empty(depth.size, dtype='d')
150number_of_points = ncols * nrows
151
152# ---------------------------------------------
153# Create mask of no_data values across depth, current_x and current_y to ensure all three quantities
154# have no_data values in corresponding cells - this is a work-around until GEMS can produce a
155# a consistant dataset
156# ---------------------------------------------
157
158no_value_index = numpy.where(((depth < -9000) + (current_x < -9000) + (current_y < -9000)) == True)[0]
159
160numpy.put(stage, no_value_index, -9999)
161numpy.put(current_x, no_value_index, -9999)
162numpy.put(current_y, no_value_index, -9999)
163numpy.put(depth, no_value_index, 0)
164   
165# Taking absolute value is to account for -ve depths obtained when depth-elevation
166# is slightly -ve  - why I don't know, possbly a rounding error?
167#momentum = numpy.absolute(depth * current_x)
168
169print 'Calculating stage, xmomentum and ymomentum'
170
171#stage = depth - numpy.tile(numpy.absolute(elevation), number_of_timesteps)
172stage = depth + numpy.tile(elevation, number_of_timesteps)
173xmomentum = current_x*depth #momentum*numpy.sin(numpy.radians(current_y))
174ymomentum = current_y*depth #momentum*numpy.cos(numpy.radians(current_y))
175
176numpy.put(xmomentum, no_value_index, 0)
177numpy.put(ymomentum, no_value_index, 0)
178
179#assert len(numpy.where((numpy.sqrt(xmomentum**2 + ymomentum**2) - momentum) > 1.e-06)[0]) == 0
180   
181x_origin_int = int(10000*x_origin)
182y_origin_int = int(10000*y_origin)
183grid_size_int = int(10000*grid_size)
184
185x = numpy.tile(numpy.arange(x_origin_int, (x_origin_int + ncols * grid_size_int), grid_size_int)/10000.0, nrows)
186y = numpy.repeat(numpy.arange(y_origin_int, (y_origin_int + nrows * grid_size_int), grid_size_int)/10000.0, ncols)
187
188assert x.size == y.size == number_of_points
189
190time = numpy.arange(start_time, end_time, timestep, dtype='i')
191
192assert time.size == number_of_timesteps
193assert xmomentum.size == depth.size == ymomentum.size
194
195# -----------------------------
196# Create the STS file
197# -----------------------------
198print "Creating the STS NetCDF file"
199
200#for j in range(2):
201
202fid = NetCDFFile(os.path.join(event_folder, event + '_master_2_1.sts'), 'wl')
203fid.institution = 'Geoscience Australia'
204fid.description = "description"
205fid.starttime = 0.0
206fid.ncols = ncols
207fid.nrows = nrows
208fid.grid_size = grid_size
209fid.no_data = no_data
210fid.createDimension('number_of_points', number_of_points)
211fid.createDimension('number_of_timesteps', number_of_timesteps)
212fid.createDimension('numbers_in_range', 2)
213
214fid.createVariable('x', 'd', ('number_of_points',))
215fid.createVariable('y', 'd', ('number_of_points',))
216fid.createVariable('elevation', 'd', ('number_of_points',))
217fid.createVariable('elevation_range', 'd', ('numbers_in_range',))
218fid.createVariable('time', 'i', ('number_of_timesteps',))
219fid.createVariable('stage', 'd', ('number_of_timesteps', 'number_of_points'))
220fid.createVariable('stage_range', 'd', ('numbers_in_range', ))
221fid.createVariable('xmomentum', 'd', ('number_of_timesteps', 'number_of_points'))
222fid.createVariable('xmomentum_range', 'd', ('numbers_in_range',))
223fid.createVariable('ymomentum', 'd', ('number_of_timesteps', 'number_of_points'))
224fid.createVariable('ymomentum_range', 'd', ('numbers_in_range',))
225
226fid.variables['elevation_range'][:] = numpy.array([1e+036, -1e+036])
227fid.variables['stage_range'][:] = numpy.array([1e+036, -1e+036])
228fid.variables['xmomentum_range'][:] = numpy.array([1e+036, -1e+036])
229fid.variables['ymomentum_range'][:] = numpy.array([1e+036, -1e+036])
230fid.variables['elevation'][:] = elevation
231fid.variables['time'][:] = time#[j*60 : j*60 + 60]
232
233s = fid.variables['stage']
234xm = fid.variables['xmomentum']
235ym = fid.variables['ymomentum']
236
237#jj = j*number_of_points*(number_of_timesteps/2)
238
239for i in xrange(number_of_timesteps):
240    ii = i*number_of_points# + jj
241    s[i] = stage[ii : ii + number_of_points]
242    xm[i] = xmomentum[ii : ii + number_of_points]
243    ym[i] = ymomentum[ii : ii + number_of_points]
244
245origin = Geo_reference(refzone, min(x), min(y)) # Check this section for inputs in eastings and northings - it works for long and lat
246geo_ref = write_NetCDF_georeference(origin, fid)
247
248fid.variables['x'][:] = x - geo_ref.get_xllcorner()
249fid.variables['y'][:] = y - geo_ref.get_yllcorner()
250
251fid.close()
252
Note: See TracBrowser for help on using the repository browser.