Changeset 7794


Ignore:
Timestamp:
Jun 7, 2010, 9:38:23 AM (10 years ago)
Author:
sexton
Message:

updates to bunbury scripts

Location:
branches/anuga_1_1/anuga_work/production/bunbury_storm_surge_2009
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • branches/anuga_1_1/anuga_work/production/bunbury_storm_surge_2009/build_gems_boundary.py

    r7678 r7794  
    1313import sys
    1414# sys.path.append('W:\\sandpits\\lfountain\\anuga_work\\production\\mandurah_storm_surge_2009\\gems_comparison')
    15 sys.path.append('/nas/gemd/georisk_models/inundation/sandpits/lfountain/anuga_work/production/mandurah_storm_surge_2009/gems_comparison')
    16 import project_GEMS_grid
     15#sys.path.append('/nas/gemd/georisk_models/inundation/sandpits/lfountain/anuga_work/production/mandurah_storm_surge_2009/gems_comparison')
     16import project
    1717from Scientific.IO.NetCDF import NetCDFFile
    1818from anuga.coordinate_transforms.geo_reference import Geo_reference, write_NetCDF_georeference
     
    2323state = 'western_australia'
    2424scenario_folder = 'bunbury_storm_surge_scenario_2009'
    25 event = 'case_1' # Baseline TC Alby event
     25event = '20100527_gcom_12min' # Baseline TC Alby event
    2626
    2727ENV_INUNDATIONHOME = 'INUNDATIONHOME'
     
    3535# Define boundary points to extract from master STS file
    3636#--------------------------------------------------------
    37 gems_boundary = numpy.array(read_polygon(project_GEMS_grid.gems_order))
     37gems_boundary = numpy.array(read_polygon(project.gems_order))
     38#gems_boundary = numpy.array(read_polygon(project.gauges))
    3839number_of_points = gems_boundary.shape[0]
     40print 'hello', number_of_points
     41print 'points', gems_boundary
    3942
    4043#---------------------------------------------------------
     
    4245#---------------------------------------------------------
    4346print "Reading master NetCDF file"
    44 infile = NetCDFFile(os.path.join(event_folder, event + '_master.sts'), 'r')
     47infile = NetCDFFile(os.path.join(event_folder, event + '_master_2_1.sts'), 'rl') #rl for large netcdf files
    4548
    4649grid_size = infile.grid_size
     
    6972origin = numpy.array([x_origin[0], y_origin[0]])
    7073index_array_2D = ((gems_boundary - origin)/grid_size).astype(int)
    71 assert index_array_2D[:,0].min() >= 0 and index_array_2D[:,0].max() <= ncols
     74assert index_array_2D[:,0].min() >= 0 and index_array_2D[:,0].max() <= ncols # These test that the boundary points lie within the data area
    7275assert index_array_2D[:,1].min() >= 0 and index_array_2D[:,1].max() <= nrows
    7376
     
    135138    s[i] = stage[i,:]
    136139    xm[i] = xmomentum[i,:]
    137     ym[i] = ymomentum[i,:]
    138  
     140    ym[i] = ymomentum[i,:]
     141
     142#---------------------------------------------------------------------------
     143# Get timeseries values for wave height and components of momentum
     144#---------------------------------------------------------------------------
     145
     146basename = 'sts_gauge_tide'
     147for i in xrange(number_of_points):
     148    out_file = os.path.join(event_folder,
     149                            basename+'_'+str(i)+'.csv')
     150    fid_sts = open(out_file, 'w')
     151    fid_sts.write('time, stage, xmomentum, ymomentum, elevation \n')
     152    for j in xrange(number_of_timesteps):
     153        fid_sts.write('%.6f, %.6f, %.6f, %.6f, %.6f\n'
     154                    % (time[j], stage[j,i], xmomentum[j,i], ymomentum[j,i], elevation[i]))
     155
     156    fid_sts.close()
     157
    139158origin = Geo_reference(refzone, x_origin, y_origin)
    140159geo_ref = write_NetCDF_georeference(origin, fid)
    141160
    142161fid.variables['x'][:] = x
    143 fid.variables['y'][:] = y
     162fid.variables['y'][:] = y  
    144163
    145164fid.close()
     165
     166sys.exit()
     167
     168print 'get timeseries'
     169#-------------------------------------------------------------------------------
     170# Get gauges (timeseries of index points)
     171#-------------------------------------------------------------------------------
     172
     173basename = 'sts_gauge'
     174quantity_names = ['stage', 'xmomentum', 'ymomentum']
     175quantities = {}
     176for i, name in enumerate(quantity_names):
     177    quantities[name] = fid.variables[name][:]
     178
     179
     180for j in range(len(x)):
     181    index = j # permutation[j]
     182    stage = quantities['stage'][:,j]
     183    #print 'hello', j, stage
     184    xmomentum = quantities['xmomentum'][:,j]
     185    ymomentum = quantities['ymomentum'][:,j]
     186
     187    out_file = os.path.join(event_folder,
     188                            basename+'_'+str(index)+'.csv')
     189    fid_sts = open(out_file, 'w')
     190    fid_sts.write('time, stage, xmomentum, ymomentum \n')
     191
     192    #-----------------------------------------------------------------------
     193    # End of the get gauges
     194    #-----------------------------------------------------------------------
     195    for k in range(len(time)-1):
     196        fid_sts.write('%.6f, %.6f, %.6f, %.6f\n'
     197                      % (time[k], stage[k], xmomentum[k], ymomentum[k]))
     198
     199   
  • branches/anuga_1_1/anuga_work/production/bunbury_storm_surge_2009/process_gems_grids.py

    r7723 r7794  
    2222state = 'western_australia'
    2323scenario_folder = 'bunbury_storm_surge_scenario_2009'
    24 event = 'gcom_60min'
     24event = '20100527_gcom_12min'
    2525
    2626print "Processing event: ", event
     
    4545grid_file_names.sort()
    4646
    47 grid_file_names_temp = []
    48 for i in range(1, 32, 1):
    49     grid_file_names_temp.append(grid_file_names[i])
    50 
    51 grid_file_names = grid_file_names_temp
     47#grid_file_names_temp = []
     48# this was to subsample the gz files
     49#for i in range(1, 32, 1):
     50#    grid_file_names_temp.append(grid_file_names[i])
     51
     52#grid_file_names = grid_file_names_temp
    5253   
    5354number_of_timesteps = len(grid_file_names)
     
    5657
    5758start_time = 0      # all times in seconds
    58 timestep = 3600 #1440 #720     # all times in seconds
     59timestep = 720     # all times in seconds
    5960end_time =  start_time + (timestep*number_of_timesteps) # all times in seconds
    6061refzone = 50        # UTM zone of model
     
    9394
    9495#------------------------------------------------------
    95 # Create arrays of elevation, stage, speed and theta
     96# Create arrays of elevation, depth, current_x and current_y
    9697#-------------------------------------------------------
    97 print 'creating numpy arrays: elevation, stage, speed, theta'
    98 
    99 stage = numpy.empty(number_of_timesteps*nrows*ncols, dtype=float)
    100 speed = numpy.empty(number_of_timesteps*nrows*ncols, dtype=float)
    101 theta = numpy.empty(number_of_timesteps*nrows*ncols, dtype=float)
     98print 'creating numpy arrays: elevation, depth, current_x, current_y'
     99
     100depth = numpy.empty(number_of_timesteps*nrows*ncols, dtype=float)
     101current_x = numpy.empty(number_of_timesteps*nrows*ncols, dtype=float)
     102current_y = numpy.empty(number_of_timesteps*nrows*ncols, dtype=float)
    102103
    103104for j, f in enumerate(grid_file_names):
     
    106107    gz_file = gzip.open(f, 'rb')
    107108
    108     st = []
    109     sp = []
    110     t = []
     109    d = []
     110    cx = []
     111    cy = []
    111112
    112113    for i, L in enumerate(gz_file):
    113114       
    114         if i > 7 and i < (8 + nrows):   #  7 refers to the number of rows of header info that we skip - always CHECK format
    115             st += L.strip().split()
     115        #if i > 7 and i < (8 + nrows): #  7 refers to the number of rows of header info that we skip - always CHECK format
     116        #    print 'first block'
     117
     118        #if i > (9 + nrows) and i < (10 + 2*nrows):
     119        #    print 'second block'
     120
     121        #if i > (11 + 2*nrows):
     122        #    print 'third block'
     123
     124        if i > 13 + 3*nrows and i < (14 + 4*nrows):   
     125            d += L.strip().split()
    116126           
    117         if i > (9 + nrows) and i < (10 + 2*nrows):
    118             sp += L.strip().split()
     127        if i > (15 + 4*nrows) and i < (16 + 5*nrows):
     128            cx += L.strip().split()
    119129           
    120         if i > (11 + 2*nrows):
    121             t += L.strip().split()
    122 
    123     stage[j*nrows*ncols : (j+1)*nrows*ncols] = numpy.flipud(numpy.array(st).astype('d'))
    124     speed[j*nrows*ncols : (j+1)*nrows*ncols] = numpy.flipud(numpy.array(sp).astype('d'))
    125     theta[j*nrows*ncols : (j+1)*nrows*ncols] = numpy.flipud(numpy.array(t).astype('d'))
     130        if i > (17 + 5*nrows):
     131            cy += L.strip().split()
     132
     133    depth[j*nrows*ncols : (j+1)*nrows*ncols] = numpy.flipud(numpy.array(d).astype('d'))
     134    current_x[j*nrows*ncols : (j+1)*nrows*ncols] = numpy.flipud(numpy.array(cx).astype('d'))
     135    current_y[j*nrows*ncols : (j+1)*nrows*ncols] = numpy.flipud(numpy.array(cy).astype('d'))
    126136
    127137    gz_file.close()
     
    130140
    131141print 'Size of elevation array: ', elevation.size
    132 print 'Size of stage array: ', stage.size
    133 print 'Size of speed array: ', speed.size
    134 print 'Size of theta array: ', theta.size
    135 
    136 assert stage.size == speed.size == theta.size == ncols * nrows * number_of_timesteps
    137 assert stage.size == number_of_timesteps * elevation.size
    138 
    139 depth = numpy.empty(stage.size, dtype='d')
     142print 'Size of depth array: ', depth.size
     143print 'Size of current x array: ', current_x.size
     144print 'Size of current y array: ', current_y.size
     145
     146assert depth.size == current_x.size == current_y.size == ncols * nrows * number_of_timesteps
     147assert depth.size == number_of_timesteps * elevation.size
     148
     149stage = numpy.empty(depth.size, dtype='d')
    140150number_of_points = ncols * nrows
    141151
    142152# ---------------------------------------------
    143 # Create mask of no_data values across stage, speed and theta to ensure all three quantities
     153# Create mask of no_data values across depth, current_x and current_y to ensure all three quantities
    144154# have no_data values in corresponding cells - this is a work-around until GEMS can produce a
    145155# a consistant dataset
    146156# ---------------------------------------------
    147 import pdb
    148 pdb.set_trace()
    149 no_value_index = numpy.where(((stage < -100) + (speed < 0) + (theta < 0)) == True)[0]
    150 depth = stage - numpy.tile(elevation, number_of_timesteps)
     157
     158no_value_index = numpy.where(((depth < -9000) + (current_x < -9000) + (current_y < -9000)) == True)[0]
    151159
    152160numpy.put(stage, no_value_index, -9999)
    153 numpy.put(speed, no_value_index, -9999)
    154 numpy.put(theta, no_value_index, -9999)
     161numpy.put(current_x, no_value_index, -9999)
     162numpy.put(current_y, no_value_index, -9999)
    155163numpy.put(depth, no_value_index, 0)
    156164   
    157 # Taking absolute value is to account for -ve depths obtained when stage-elevation
     165# Taking absolute value is to account for -ve depths obtained when depth-elevation
    158166# is slightly -ve  - why I don't know, possbly a rounding error?
    159 momentum = numpy.absolute(depth * speed)
    160 
    161 print 'Calculating xmomentum and ymomentum'
    162 
    163 xmomentum = momentum*numpy.sin(numpy.radians(theta))
    164 ymomentum = momentum*numpy.cos(numpy.radians(theta))
     167#momentum = numpy.absolute(depth * current_x)
     168
     169print 'Calculating stage, xmomentum and ymomentum'
     170
     171#stage = depth - numpy.tile(numpy.absolute(elevation), number_of_timesteps)
     172stage = depth + numpy.tile(elevation, number_of_timesteps)
     173xmomentum = current_x*depth #momentum*numpy.sin(numpy.radians(current_y))
     174ymomentum = current_y*depth #momentum*numpy.cos(numpy.radians(current_y))
    165175
    166176numpy.put(xmomentum, no_value_index, 0)
    167177numpy.put(ymomentum, no_value_index, 0)
    168178
    169 #    if t > 0.0 and t < 90.0:
    170 #        assert mx >= 0 and my >= 0
    171        
    172 #    elif t > 90.0 and t < 180.0:
    173 #        assert mx >= 0 and my <= 0
    174        
    175 #    elif t > 180.0 and t < 270.0:
    176 #        assert mx <= 0  and my <= 0
    177        
    178 #    elif t > 270.0 and t < 360.0:
    179 #        assert mx <= 0 and my >= 0
    180        
    181 #    elif t == 0.0 or t == 360.0:
    182 #        assert my >= 0
    183        
    184 #    elif t == 90.0:
    185 #        assert mx >= 0
    186        
    187 #    elif t == 180.0:
    188 #        assert my <= 0
    189        
    190 #    elif t == 270.0:
    191 #        assert mx <= 0
    192        
    193 #    elif t == -9999:
    194 #        mx = 0
    195 #        my = 0
    196 #    else:
    197 #        print "Unexpected value of theta"
    198 #        exit()
    199        
    200 #    xmomentum[i] = mx
    201 #    ymomentum[i] = my
    202 
    203 assert len(numpy.where((numpy.sqrt(xmomentum**2 + ymomentum**2) - momentum) > 1.e-06)[0]) == 0
     179#assert len(numpy.where((numpy.sqrt(xmomentum**2 + ymomentum**2) - momentum) > 1.e-06)[0]) == 0
    204180   
    205181x_origin_int = int(10000*x_origin)
     
    215191
    216192assert time.size == number_of_timesteps
    217 assert momentum.size == stage.size
     193assert xmomentum.size == depth.size == ymomentum.size
    218194
    219195# -----------------------------
Note: See TracChangeset for help on using the changeset viewer.