source: trunk/anuga_core/source/anuga/shallow_water/data_manager.py @ 7772

Last change on this file since 7772 was 7772, checked in by hudson, 14 years ago

Fixed a bunch of failing tests - only 3 still failing.

File size: 26.6 KB
RevLine 
[2852]1"""datamanager.py - input output for AnuGA
2
3
4This module takes care of reading and writing datafiles such as topograhies,
5model output, etc
6
7
8Formats used within AnuGA:
9
10.sww: Netcdf format for storing model output f(t,x,y)
11.tms: Netcdf format for storing time series f(t)
12
[4663]13.csv: ASCII format for storing arbitrary points and associated attributes
[2852]14.pts: NetCDF format for storing arbitrary points and associated attributes
15
16.asc: ASCII format of regular DEMs as output from ArcView
17.prj: Associated ArcView file giving more meta data for asc format
18.ers: ERMapper header format of regular DEMs for ArcView
19
20.dem: NetCDF representation of regular DEM data
21
22.tsh: ASCII format for storing meshes and associated boundary and region info
23.msh: NetCDF format for storing meshes and associated boundary and region info
24
25.nc: Native ferret NetCDF format
26.geo: Houdinis ascii geometry format (?)
27
28
29A typical dataflow can be described as follows
30
31Manually created files:
32ASC, PRJ:     Digital elevation models (gridded)
[3535]33TSH:          Triangular meshes (e.g. created from anuga.pmesh)
[2852]34NC            Model outputs for use as boundary conditions (e.g from MOST)
35
36
37AUTOMATICALLY CREATED FILES:
38
39ASC, PRJ  ->  DEM  ->  PTS: Conversion of DEM's to native pts file
40
41NC -> SWW: Conversion of MOST bundary files to boundary sww
42
43PTS + TSH -> TSH with elevation: Least squares fit
44
45TSH -> SWW:  Conversion of TSH to sww viewable using Swollen
46
[3560]47TSH + Boundary SWW -> SWW: Simluation using abstract_2d_finite_volumes
[2852]48
49"""
50
[6080]51# This file was reverted from changeset:5484 to changeset:5470 on 10th July
[5485]52# by Ole.
53
[6080]54import os, sys
55import csv
56import string
[4500]57import shutil
[3720]58from struct import unpack
59import array as p_array
[4595]60from os import sep, path, remove, mkdir, access, F_OK, W_OK, getcwd
[2852]61
[7276]62import numpy as num
[4595]63
[3720]64from Scientific.IO.NetCDF import NetCDFFile
[4595]65from os.path import exists, basename, join
[4636]66from os import getcwd
[2852]67
[4271]68from anuga.coordinate_transforms.redfearn import redfearn, \
69     convert_from_latlon_to_utm
[4387]70from anuga.coordinate_transforms.geo_reference import Geo_reference, \
[4455]71     write_NetCDF_georeference, ensure_geo_reference
[4382]72from anuga.geospatial_data.geospatial_data import Geospatial_data,\
73     ensure_absolute
[6080]74from anuga.config import minimum_storable_height as \
75     default_minimum_storable_height
[6086]76from anuga.config import netcdf_mode_r, netcdf_mode_w, netcdf_mode_a
[7276]77from anuga.config import netcdf_float, netcdf_float32, netcdf_int
[4699]78from anuga.config import max_float
[4271]79from anuga.utilities.numerical_tools import ensure_numeric,  mean
[3720]80from anuga.caching.caching import myhash
[7751]81from anuga.shallow_water.shallow_water_domain import Domain
[4271]82from anuga.abstract_2d_finite_volumes.pmesh2domain import \
83     pmesh_to_domain_instance
[4480]84from anuga.abstract_2d_finite_volumes.util import get_revision_number, \
[4567]85     remove_lone_verts, sww2timeseries, get_centroid_values
[6080]86
[5729]87from anuga.abstract_2d_finite_volumes.neighbour_mesh import segment_midpoints
[4497]88from anuga.load_mesh.loadASCII import export_mesh_file
[7711]89from anuga.geometry.polygon import intersection
[7744]90from anuga.file_conversion.sww2dem import sww2dem
[7276]91
[6556]92from anuga.utilities.system_tools import get_vars_in_expression
[7317]93import anuga.utilities.log as log
[5226]94
[7735]95from anuga.utilities.file_utils import create_filename,\
[7765]96                        get_all_swwfiles
97from anuga.file.csv_file import load_csv_as_dict
[7735]98from sww_file import Read_sww, Write_sww
[5226]99
[7743]100from anuga.anuga_exceptions import DataMissingValuesError, \
101                DataFileNotOpenError, DataTimeError, DataDomainError, \
102                NewQuantity
[7735]103
[7743]104
[6556]105
[6224]106def csv2building_polygons(file_name,
107                          floor_height=3,
108                          clipping_polygons=None):
[6120]109    """
110    Convert CSV files of the form:
111
112    easting,northing,id,floors
113    422664.22,870785.46,2,0
114    422672.48,870780.14,2,0
115    422668.17,870772.62,2,0
116    422660.35,870777.17,2,0
117    422664.22,870785.46,2,0
118    422661.30,871215.06,3,1
119    422667.50,871215.70,3,1
120    422668.30,871204.86,3,1
121    422662.21,871204.33,3,1
122    422661.30,871215.06,3,1
123
124    to a dictionary of polygons with id as key.
125    The associated number of floors are converted to m above MSL and
126    returned as a separate dictionary also keyed by id.
127   
128    Optional parameter floor_height is the height of each building story.
[6224]129    Optional parameter clipping_olygons is a list of polygons selecting
130    buildings. Any building not in these polygons will be omitted.
[6120]131   
132    See csv2polygons for more details
133    """
134
[6224]135    polygons, values = csv2polygons(file_name,
136                                    value_name='floors',
137                                    clipping_polygons=None)   
[6120]138
139   
140    heights = {}
141    for key in values.keys():
142        v = float(values[key])
143        heights[key] = v*floor_height
144       
145    return polygons, heights               
146           
147
[6080]148##
[6120]149# @brief Convert CSV file into a dictionary of polygons and associated values.
150# @param filename The path to the file to read, value_name name for the 4th column
[6224]151def csv2polygons(file_name,
152                 value_name='value',
153                 clipping_polygons=None):
[6120]154    """
155    Convert CSV files of the form:
156
157    easting,northing,id,value
158    422664.22,870785.46,2,0
159    422672.48,870780.14,2,0
160    422668.17,870772.62,2,0
161    422660.35,870777.17,2,0
162    422664.22,870785.46,2,0
163    422661.30,871215.06,3,1
164    422667.50,871215.70,3,1
165    422668.30,871204.86,3,1
166    422662.21,871204.33,3,1
167    422661.30,871215.06,3,1
168
169    to a dictionary of polygons with id as key.
170    The associated values are returned as a separate dictionary also keyed by id.
171
172
173    easting: x coordinate relative to zone implied by the model
174    northing: y coordinate relative to zone implied by the model   
175    id: tag for polygon comprising points with this tag
176    value: numeral associated with each polygon. These must be the same for all points in each polygon.
177   
178    The last header, value, can take on other names such as roughness, floors, etc - or it can be omitted
179    in which case the returned values will be None
180   
181    Eastings and Northings will be returned as floating point values while
182    id and values will be returned as strings.
[6224]183
184    Optional argument: clipping_polygons will select only those polygons that are
185    fully within one or more of the clipping_polygons. In other words any polygon from
186    the csv file which has at least one point not inside one of the clipping polygons
187    will be excluded
[6120]188   
[7735]189    See underlying function load_csv_as_dict for more details.
[6120]190    """
191
[7735]192    X, _ = load_csv_as_dict(file_name)
[6120]193
194    msg = 'Polygon csv file must have 3 or 4 columns'
195    assert len(X.keys()) in [3, 4], msg
196   
197    msg = 'Did not find expected column header: easting'
198    assert 'easting' in X.keys(), msg
199   
200    msg = 'Did not find expected column header: northing'   
201    assert 'northing' in X.keys(), northing
202   
203    msg = 'Did not find expected column header: northing'       
204    assert 'id' in X.keys(), msg
205   
206    if value_name is not None:
207        msg = 'Did not find expected column header: %s' % value_name       
208        assert value_name in X.keys(), msg   
209   
210    polygons = {}
211    if len(X.keys()) == 4:
212        values = {}
213    else:
214        values = None
215
216    # Loop through entries and compose polygons
[6224]217    excluded_polygons={}
[6132]218    past_ids = {}
219    last_id = None
[6120]220    for i, id in enumerate(X['id']):
[6132]221
222        # Check for duplicate polygons
223        if id in past_ids:
224            msg = 'Polygon %s was duplicated in line %d' % (id, i)
225            raise Exception, msg
[6120]226       
227        if id not in polygons:
228            # Start new polygon
229            polygons[id] = []
230            if values is not None:
231                values[id] = X[value_name][i]
[6132]232
233            # Keep track of previous polygon ids
234            if last_id is not None:
235                past_ids[last_id] = i
[6120]236           
237        # Append this point to current polygon
238        point = [float(X['easting'][i]), float(X['northing'][i])]
[6224]239
240        if clipping_polygons is not None:
241            exclude=True
242            for clipping_polygon in clipping_polygons:
243                if inside_polygon(point, clipping_polygon):
244                    exclude=False
245                    break
246               
247            if exclude is True:
248                excluded_polygons[id]=True
249
[6120]250        polygons[id].append(point)   
251           
252        # Check that value is the same across each polygon
[6132]253        msg = 'Values must be the same across each polygon.'
254        msg += 'I got %s in line %d but it should have been %s' % (X[value_name][i], i, values[id])
255        assert values[id] == X[value_name][i], msg
256
257        last_id = id
[6224]258
259    # Weed out polygons that were not wholly inside clipping polygons
260    for id in excluded_polygons:
261        del polygons[id]
[6120]262       
263    return polygons, values
264
265
266           
[6080]267
268
269
270
271##
272# @brief Filter data file, selecting timesteps first:step:last.
273# @param filename1 Data file to filter.
274# @param filename2 File to write filtered timesteps to.
275# @param first First timestep.
276# @param last Last timestep.
277# @param step Timestep stride.
278def filter_netcdf(filename1, filename2, first=0, last=None, step=1):
[7276]279    """Filter data file, selecting timesteps first:step:last.
280   
281    Read netcdf filename1, pick timesteps first:step:last and save to
[2852]282    nettcdf file filename2
283    """
[6080]284
[2852]285    from Scientific.IO.NetCDF import NetCDFFile
286
[6080]287    # Get NetCDF
[6086]288    infile = NetCDFFile(filename1, netcdf_mode_r)  #Open existing file for read
289    outfile = NetCDFFile(filename2, netcdf_mode_w)  #Open new file
[2852]290
[6080]291    # Copy dimensions
[2852]292    for d in infile.dimensions:
293        outfile.createDimension(d, infile.dimensions[d])
294
[6080]295    # Copy variable definitions
[2852]296    for name in infile.variables:
297        var = infile.variables[name]
[7276]298        outfile.createVariable(name, var.dtype.char, var.dimensions)
[2852]299
[6080]300    # Copy the static variables
[2852]301    for name in infile.variables:
302        if name == 'time' or name == 'stage':
303            pass
304        else:
305            outfile.variables[name][:] = infile.variables[name][:]
306
[6080]307    # Copy selected timesteps
[2852]308    time = infile.variables['time']
309    stage = infile.variables['stage']
310
311    newtime = outfile.variables['time']
312    newstage = outfile.variables['stage']
313
314    if last is None:
315        last = len(time)
316
317    selection = range(first, last, step)
318    for i, j in enumerate(selection):
[7317]319        log.critical('Copying timestep %d of %d (%f)'
320                     % (j, last-first, time[j]))
[2852]321        newtime[i] = time[j]
322        newstage[i,:] = stage[j,:]
323
[6080]324    # Close
[2852]325    infile.close()
326    outfile.close()
327
328
[6080]329##
330# @brief
331# @param basename_in
332# @param extra_name_out
333# @param quantities
334# @param timestep
335# @param reduction
336# @param cellsize
337# @param number_of_decimal_places
338# @param NODATA_value
339# @param easting_min
340# @param easting_max
341# @param northing_min
342# @param northing_max
343# @param verbose
344# @param origin
345# @param datum
346# @param format
347# @return
348def export_grid(basename_in, extra_name_out=None,
349                quantities=None, # defaults to elevation
350                reduction=None,
351                cellsize=10,
352                number_of_decimal_places=None,
353                NODATA_value=-9999,
354                easting_min=None,
355                easting_max=None,
356                northing_min=None,
357                northing_max=None,
358                verbose=False,
359                origin=None,
360                datum='WGS84',
361                format='ers'):
362    """Wrapper for sww2dem.
363    See sww2dem to find out what most of the parameters do.
364
[4462]365    Quantities is a list of quantities.  Each quantity will be
366    calculated for each sww file.
367
368    This returns the basenames of the files returned, which is made up
369    of the dir and all of the file name, except the extension.
370
371    This function returns the names of the files produced.
[4535]372
[6080]373    It will also produce as many output files as there are input sww files.
[4462]374    """
[6080]375
[4462]376    if quantities is None:
377        quantities = ['elevation']
[6080]378
[4462]379    if type(quantities) is str:
380            quantities = [quantities]
381
382    # How many sww files are there?
383    dir, base = os.path.split(basename_in)
[4586]384
[6080]385    iterate_over = get_all_swwfiles(dir, base, verbose)
386
[4526]387    if dir == "":
388        dir = "." # Unix compatibility
[6080]389
[4462]390    files_out = []
[4548]391    for sww_file in iterate_over:
[4462]392        for quantity in quantities:
393            if extra_name_out is None:
394                basename_out = sww_file + '_' + quantity
395            else:
[6080]396                basename_out = sww_file + '_' + quantity + '_' + extra_name_out
397
[4524]398            file_out = sww2dem(dir+sep+sww_file, dir+sep+basename_out,
[6080]399                               quantity,
[4462]400                               reduction,
401                               cellsize,
[5627]402                               number_of_decimal_places,
[4462]403                               NODATA_value,
404                               easting_min,
405                               easting_max,
406                               northing_min,
407                               northing_max,
408                               verbose,
409                               origin,
410                               datum,
411                               format)
412            files_out.append(file_out)
413    return files_out
[4545]414
415
[6080]416##
417# @brief Read DEM file, decimate data, write new DEM file.
418# @param basename_in Stem of the input filename.
419# @param stencil
420# @param cellsize_new New cell size to resample on.
421# @param basename_out Stem of the output filename.
422# @param verbose True if this function is to be verbose.
[2852]423def decimate_dem(basename_in, stencil, cellsize_new, basename_out=None,
424                 verbose=False):
425    """Read Digitial Elevation model from the following NetCDF format (.dem)
426
427    Example:
428
429    ncols         3121
430    nrows         1800
431    xllcorner     722000
432    yllcorner     5893000
433    cellsize      25
434    NODATA_value  -9999
435    138.3698 137.4194 136.5062 135.5558 ..........
436
437    Decimate data to cellsize_new using stencil and write to NetCDF dem format.
438    """
439
440    import os
441    from Scientific.IO.NetCDF import NetCDFFile
442
443    root = basename_in
444    inname = root + '.dem'
445
446    #Open existing netcdf file to read
[6086]447    infile = NetCDFFile(inname, netcdf_mode_r)
[2852]448
[7317]449    if verbose: log.critical('Reading DEM from %s' % inname)
[6080]450
[7308]451    # Read metadata (convert from numpy.int32 to int where appropriate)
452    ncols = int(infile.ncols[0])
453    nrows = int(infile.nrows[0])
[2852]454    xllcorner = infile.xllcorner[0]
455    yllcorner = infile.yllcorner[0]
[7308]456    cellsize = int(infile.cellsize[0])
457    NODATA_value = int(infile.NODATA_value[0])
458    zone = int(infile.zone[0])
[2852]459    false_easting = infile.false_easting[0]
460    false_northing = infile.false_northing[0]
461    projection = infile.projection
462    datum = infile.datum
463    units = infile.units
464
465    dem_elevation = infile.variables['elevation']
466
467    #Get output file name
468    if basename_out == None:
469        outname = root + '_' + repr(cellsize_new) + '.dem'
470    else:
471        outname = basename_out + '.dem'
472
[7317]473    if verbose: log.critical('Write decimated NetCDF file to %s' % outname)
[2852]474
475    #Determine some dimensions for decimated grid
476    (nrows_stencil, ncols_stencil) = stencil.shape
477    x_offset = ncols_stencil / 2
478    y_offset = nrows_stencil / 2
479    cellsize_ratio = int(cellsize_new / cellsize)
480    ncols_new = 1 + (ncols - ncols_stencil) / cellsize_ratio
481    nrows_new = 1 + (nrows - nrows_stencil) / cellsize_ratio
482
[7308]483    #print type(ncols_new), ncols_new
484   
[2852]485    #Open netcdf file for output
[6086]486    outfile = NetCDFFile(outname, netcdf_mode_w)
[2852]487
488    #Create new file
489    outfile.institution = 'Geoscience Australia'
[6080]490    outfile.description = 'NetCDF DEM format for compact and portable ' \
491                          'storage of spatial point data'
492
[2852]493    #Georeferencing
494    outfile.zone = zone
495    outfile.projection = projection
496    outfile.datum = datum
497    outfile.units = units
498
499    outfile.cellsize = cellsize_new
500    outfile.NODATA_value = NODATA_value
501    outfile.false_easting = false_easting
502    outfile.false_northing = false_northing
503
504    outfile.xllcorner = xllcorner + (x_offset * cellsize)
505    outfile.yllcorner = yllcorner + (y_offset * cellsize)
506    outfile.ncols = ncols_new
507    outfile.nrows = nrows_new
508
509    # dimension definition
[7308]510    #print nrows_new, ncols_new, nrows_new*ncols_new
511    #print type(nrows_new), type(ncols_new), type(nrows_new*ncols_new)
[2852]512    outfile.createDimension('number_of_points', nrows_new*ncols_new)
513
514    # variable definition
[7276]515    outfile.createVariable('elevation', netcdf_float, ('number_of_points',))
[2852]516
517    # Get handle to the variable
518    elevation = outfile.variables['elevation']
519
[6157]520    dem_elevation_r = num.reshape(dem_elevation, (nrows, ncols))
[2852]521
522    #Store data
523    global_index = 0
524    for i in range(nrows_new):
[7317]525        if verbose: log.critical('Processing row %d of %d' % (i, nrows_new))
[6080]526
[2852]527        lower_index = global_index
[7276]528        telev = num.zeros(ncols_new, num.float)
[2852]529        local_index = 0
530        trow = i * cellsize_ratio
531
532        for j in range(ncols_new):
533            tcol = j * cellsize_ratio
[6080]534            tmp = dem_elevation_r[trow:trow+nrows_stencil,
535                                  tcol:tcol+ncols_stencil]
[2852]536
537            #if dem contains 1 or more NODATA_values set value in
538            #decimated dem to NODATA_value, else compute decimated
539            #value using stencil
[6157]540            if num.sum(num.sum(num.equal(tmp, NODATA_value))) > 0:
[2852]541                telev[local_index] = NODATA_value
542            else:
[6157]543                telev[local_index] = num.sum(num.sum(tmp * stencil))
[2852]544
545            global_index += 1
546            local_index += 1
547
548        upper_index = global_index
549
550        elevation[lower_index:upper_index] = telev
551
[6080]552    assert global_index == nrows_new*ncols_new, \
553           'index not equal to number of points'
[2852]554
555    infile.close()
556    outfile.close()
557
558
[3720]559    ####  URS 2 SWW  ###
560
[6080]561# Definitions of various NetCDF dimension names, etc.
[3720]562lon_name = 'LON'
563lat_name = 'LAT'
564time_name = 'TIME'
[7276]565precision = netcdf_float # So if we want to change the precision its done here
[6080]566
567##
568# @brief Clas for a NetCDF data file writer.
[3720]569class Write_nc:
[6080]570    """Write an nc file.
571
[3720]572    Note, this should be checked to meet cdc netcdf conventions for gridded
573    data. http://www.cdc.noaa.gov/cdc/conventions/cdc_netcdf_standard.shtml
574    """
[6080]575
576    ##
577    # @brief Instantiate a Write_nc instance.
578    # @param quantity_name
579    # @param file_name
580    # @param time_step_count The number of time steps.
581    # @param time_step The time_step size.
582    # @param lon
583    # @param lat
[3720]584    def __init__(self,
585                 quantity_name,
586                 file_name,
587                 time_step_count,
588                 time_step,
589                 lon,
590                 lat):
[6080]591        """Instantiate a Write_nc instance (NetCDF file writer).
592
[3720]593        time_step_count is the number of time steps.
594        time_step is the time step size
[6080]595
596        pre-condition: quantity_name must be 'HA', 'UA'or 'VA'.
[3720]597        """
[6080]598
[3720]599        self.quantity_name = quantity_name
[4348]600        quantity_units = {'HA':'CENTIMETERS',
[6080]601                          'UA':'CENTIMETERS/SECOND',
602                          'VA':'CENTIMETERS/SECOND'}
603
604        multiplier_dic = {'HA':100.0,   # To convert from m to cm
605                          'UA':100.0,   #             and m/s to cm/sec
606                          'VA':-100.0}  # MUX files have positive x in the
607                                        # Southern direction.  This corrects
608                                        # for it, when writing nc files.
609
[4348]610        self.quantity_multiplier =  multiplier_dic[self.quantity_name]
[6080]611
[3720]612        #self.file_name = file_name
613        self.time_step_count = time_step_count
614        self.time_step = time_step
615
616        # NetCDF file definition
[6086]617        self.outfile = NetCDFFile(file_name, netcdf_mode_w)
[6080]618        outfile = self.outfile
[3720]619
620        #Create new file
621        nc_lon_lat_header(outfile, lon, lat)
[6080]622
[3720]623        # TIME
624        outfile.createDimension(time_name, None)
625        outfile.createVariable(time_name, precision, (time_name,))
626
627        #QUANTITY
628        outfile.createVariable(self.quantity_name, precision,
629                               (time_name, lat_name, lon_name))
[6080]630        outfile.variables[self.quantity_name].missing_value = -1.e+034
631        outfile.variables[self.quantity_name].units = \
[3720]632                                 quantity_units[self.quantity_name]
633        outfile.variables[lon_name][:]= ensure_numeric(lon)
634        outfile.variables[lat_name][:]= ensure_numeric(lat)
635
636        #Assume no one will be wanting to read this, while we are writing
637        #outfile.close()
[6080]638
639    ##
640    # @brief Write a time-step of quantity data.
641    # @param quantity_slice The data to be stored for this time-step.
[3720]642    def store_timestep(self, quantity_slice):
[6080]643        """Write a time slice of quantity info
644
[3720]645        quantity_slice is the data to be stored at this time step
646        """
[6080]647
[3720]648        # Get the variables
[6080]649        time = self.outfile.variables[time_name]
650        quantity = self.outfile.variables[self.quantity_name]
651
652        # get index oflice to write
[3720]653        i = len(time)
654
655        #Store time
[6080]656        time[i] = i * self.time_step    #self.domain.time
657        quantity[i,:] = quantity_slice * self.quantity_multiplier
658
659    ##
660    # @brief Close file underlying the class instance.
[3720]661    def close(self):
662        self.outfile.close()
663
[6080]664
[3720]665
[6080]666##
667# @brief Write an NC elevation file.
668# @param file_out Path to the output file.
669# @param lon ??
670# @param lat ??
671# @param depth_vector The elevation data to write.
[3964]672def write_elevation_nc(file_out, lon, lat, depth_vector):
[6080]673    """Write an nc elevation file."""
674
[3720]675    # NetCDF file definition
[6086]676    outfile = NetCDFFile(file_out, netcdf_mode_w)
[3720]677
678    #Create new file
679    nc_lon_lat_header(outfile, lon, lat)
[6080]680
[3720]681    # ELEVATION
682    zname = 'ELEVATION'
683    outfile.createVariable(zname, precision, (lat_name, lon_name))
[6080]684    outfile.variables[zname].units = 'CENTIMETERS'
685    outfile.variables[zname].missing_value = -1.e+034
[3720]686
[6080]687    outfile.variables[lon_name][:] = ensure_numeric(lon)
688    outfile.variables[lat_name][:] = ensure_numeric(lat)
[3720]689
[6157]690    depth = num.reshape(depth_vector, (len(lat), len(lon)))
[6080]691    outfile.variables[zname][:] = depth
692
[3720]693    outfile.close()
[6080]694
695
696##
697# @brief Write lat/lon headers to a NetCDF file.
698# @param outfile Handle to open file to write to.
699# @param lon An iterable of the longitudes.
700# @param lat An iterable of the latitudes.
701# @note Defines lat/long dimensions and variables. Sets various attributes:
702#          .point_spacing  and  .units
703#       and writes lat/lon data.
704
[3720]705def nc_lon_lat_header(outfile, lon, lat):
[6080]706    """Write lat/lon headers to a NetCDF file.
707
[3964]708    outfile is the netcdf file handle.
709    lon - a list/array of the longitudes
710    lat - a list/array of the latitudes
711    """
[6080]712
[3720]713    outfile.institution = 'Geoscience Australia'
714    outfile.description = 'Converted from URS binary C'
[6080]715
[3720]716    # Longitude
717    outfile.createDimension(lon_name, len(lon))
718    outfile.createVariable(lon_name, precision, (lon_name,))
[6080]719    outfile.variables[lon_name].point_spacing = 'uneven'
720    outfile.variables[lon_name].units = 'degrees_east'
[3720]721    outfile.variables[lon_name].assignValue(lon)
722
723    # Latitude
724    outfile.createDimension(lat_name, len(lat))
725    outfile.createVariable(lat_name, precision, (lat_name,))
[6080]726    outfile.variables[lat_name].point_spacing = 'uneven'
727    outfile.variables[lat_name].units = 'degrees_north'
[3720]728    outfile.variables[lat_name].assignValue(lat)
729
730
[6080]731##
732# @brief
733# @param long_lat_dep
734# @return A tuple (long, lat, quantity).
735# @note The latitude is the fastest varying dimension - in mux files.
[3720]736def lon_lat2grid(long_lat_dep):
737    """
738    given a list of points that are assumed to be an a grid,
739    return the long's and lat's of the grid.
740    long_lat_dep is an array where each row is a position.
741    The first column is longitudes.
742    The second column is latitudes.
[3820]743
744    The latitude is the fastest varying dimension - in mux files
[3720]745    """
[6080]746
[3720]747    LONG = 0
748    LAT = 1
[3820]749    QUANTITY = 2
[3830]750
[7276]751    long_lat_dep = ensure_numeric(long_lat_dep, num.float)
[6080]752
[3826]753    num_points = long_lat_dep.shape[0]
754    this_rows_long = long_lat_dep[0,LONG]
[3964]755
[3826]756    # Count the length of unique latitudes
757    i = 0
758    while long_lat_dep[i,LONG] == this_rows_long and i < num_points:
759        i += 1
[6080]760
[3964]761    # determine the lats and longsfrom the grid
[6080]762    lat = long_lat_dep[:i, LAT]
[3964]763    long = long_lat_dep[::i, LONG]
[6080]764
[3826]765    lenlong = len(long)
766    lenlat = len(lat)
[6080]767
768    msg = 'Input data is not gridded'
[3826]769    assert num_points % lenlat == 0, msg
770    assert num_points % lenlong == 0, msg
[6080]771
772    # Test that data is gridded
[3826]773    for i in range(lenlong):
[3720]774        msg = 'Data is not gridded.  It must be for this operation'
[6080]775        first = i * lenlat
[3826]776        last = first + lenlat
[6080]777
[6157]778        assert num.allclose(long_lat_dep[first:last,LAT], lat), msg
779        assert num.allclose(long_lat_dep[first:last,LONG], long[i]), msg
[6080]780
[3826]781    msg = 'Out of range latitudes/longitudes'
[3720]782    for l in lat:assert -90 < l < 90 , msg
783    for l in long:assert -180 < l < 180 , msg
784
[3826]785    # Changing quantity from lat being the fastest varying dimension to
[3820]786    # long being the fastest varying dimension
787    # FIXME - make this faster/do this a better way
788    # use numeric transpose, after reshaping the quantity vector
[7276]789    quantity = num.zeros(num_points, num.float)
[6080]790
[3820]791    for lat_i, _ in enumerate(lat):
792        for long_i, _ in enumerate(long):
[6080]793            q_index = lat_i*lenlong + long_i
794            lld_index = long_i*lenlat + lat_i
[3826]795            temp = long_lat_dep[lld_index, QUANTITY]
796            quantity[q_index] = temp
[6080]797
[3820]798    return long, lat, quantity
799
[6080]800################################################################################
801# END URS 2 SWW
802################################################################################
[4223]803
804
[5250]805
[5248]806
[6080]807
808##
809# @brief Convert points to a polygon (?)
810# @param points_file The points file.
811# @param minimum_triangle_angle ??
812# @return
813def points2polygon(points_file, minimum_triangle_angle=3.0):
[5189]814    """
[6080]815    WARNING: This function is not fully working.
816
[5189]817    Function to return a polygon returned from alpha shape, given a points file.
[6080]818
819    WARNING: Alpha shape returns multiple polygons, but this function only
820             returns one polygon.
[5189]821    """
[6080]822
[5189]823    from anuga.pmesh.mesh import Mesh, importMeshFromFile
[6080]824    from anuga.shallow_water import Domain
[5189]825    from anuga.pmesh.mesh_interface import create_mesh_from_regions
[6080]826
[5189]827    mesh = importMeshFromFile(points_file)
828    mesh.auto_segment()
829    mesh.exportASCIIsegmentoutlinefile("outline.tsh")
830    mesh2 = importMeshFromFile("outline.tsh")
[6080]831    mesh2.generate_mesh(maximum_triangle_area=1000000000,
832                        minimum_triangle_angle=minimum_triangle_angle,
833                        verbose=False)
[5189]834    mesh2.export_mesh_file('outline_meshed.tsh')
835    domain = Domain("outline_meshed.tsh", use_cache = False)
836    polygon =  domain.get_boundary_polygon()
[6080]837    return polygon
[4636]838
[6080]839
840################################################################################
841
[4500]842if __name__ == "__main__":
[6080]843    # setting umask from config to force permissions for all files and
844    # directories created to the same. (it was noticed the "mpirun" doesn't
845    # honour the umask set in your .bashrc etc file)
846
[4500]847    from config import umask
[6080]848    import os
[4500]849    os.umask(umask)
Note: See TracBrowser for help on using the repository browser.