source: anuga_core/source/anuga/shallow_water/data_manager.py @ 7012

Last change on this file since 7012 was 7012, checked in by rwilson, 15 years ago

Replaced usage of 'sets' module with set/frozenset builtin functions.

File size: 255.2 KB
Line 
1"""datamanager.py - input output for AnuGA
2
3
4This module takes care of reading and writing datafiles such as topograhies,
5model output, etc
6
7
8Formats used within AnuGA:
9
10.sww: Netcdf format for storing model output f(t,x,y)
11.tms: Netcdf format for storing time series f(t)
12
13.csv: ASCII format for storing arbitrary points and associated attributes
14.pts: NetCDF format for storing arbitrary points and associated attributes
15
16.asc: ASCII format of regular DEMs as output from ArcView
17.prj: Associated ArcView file giving more meta data for asc format
18.ers: ERMapper header format of regular DEMs for ArcView
19
20.dem: NetCDF representation of regular DEM data
21
22.tsh: ASCII format for storing meshes and associated boundary and region info
23.msh: NetCDF format for storing meshes and associated boundary and region info
24
25.nc: Native ferret NetCDF format
26.geo: Houdinis ascii geometry format (?)
27
28
29A typical dataflow can be described as follows
30
31Manually created files:
32ASC, PRJ:     Digital elevation models (gridded)
33TSH:          Triangular meshes (e.g. created from anuga.pmesh)
34NC            Model outputs for use as boundary conditions (e.g from MOST)
35
36
37AUTOMATICALLY CREATED FILES:
38
39ASC, PRJ  ->  DEM  ->  PTS: Conversion of DEM's to native pts file
40
41NC -> SWW: Conversion of MOST bundary files to boundary sww
42
43PTS + TSH -> TSH with elevation: Least squares fit
44
45TSH -> SWW:  Conversion of TSH to sww viewable using Swollen
46
47TSH + Boundary SWW -> SWW: Simluation using abstract_2d_finite_volumes
48
49"""
50
51# This file was reverted from changeset:5484 to changeset:5470 on 10th July
52# by Ole.
53
54import os, sys
55import csv
56import exceptions
57import string
58import shutil
59from struct import unpack
60import array as p_array
61from os import sep, path, remove, mkdir, access, F_OK, W_OK, getcwd
62
63import Numeric as num
64
65from Scientific.IO.NetCDF import NetCDFFile
66from os.path import exists, basename, join
67from os import getcwd
68
69from anuga.coordinate_transforms.redfearn import redfearn, \
70     convert_from_latlon_to_utm
71from anuga.coordinate_transforms.geo_reference import Geo_reference, \
72     write_NetCDF_georeference, ensure_geo_reference
73from anuga.geospatial_data.geospatial_data import Geospatial_data,\
74     ensure_absolute
75from anuga.config import minimum_storable_height as \
76     default_minimum_storable_height
77from anuga.config import netcdf_mode_r, netcdf_mode_w, netcdf_mode_a
78from anuga.config import max_float
79from anuga.utilities.numerical_tools import ensure_numeric,  mean
80from anuga.caching.caching import myhash
81from anuga.utilities.anuga_exceptions import ANUGAError
82from anuga.shallow_water import Domain
83from anuga.abstract_2d_finite_volumes.pmesh2domain import \
84     pmesh_to_domain_instance
85from anuga.abstract_2d_finite_volumes.util import get_revision_number, \
86     remove_lone_verts, sww2timeseries, get_centroid_values
87
88from anuga.abstract_2d_finite_volumes.neighbour_mesh import segment_midpoints
89from anuga.load_mesh.loadASCII import export_mesh_file
90from anuga.utilities.polygon import intersection
91from anuga.utilities.system_tools import get_vars_in_expression
92
93
94# Default block size for sww2dem()
95DEFAULT_BLOCK_SIZE = 10000
96
97######
98# Exception classes
99######
100
101class TitleValueError(exceptions.Exception): pass
102class DataMissingValuesError(exceptions.Exception): pass
103class DataFileNotOpenError(exceptions.Exception): pass
104class DataTimeError(exceptions.Exception): pass
105class DataDomainError(exceptions.Exception): pass
106class NewQuantity(exceptions.Exception): pass
107
108
109######
110# formula mappings
111######
112
113quantity_formula = {'momentum':'(xmomentum**2 + ymomentum**2)**0.5',
114                    'depth':'stage-elevation',
115                    'speed': \
116 '(xmomentum**2 + ymomentum**2)**0.5/(stage-elevation+1.e-6/(stage-elevation))'}
117
118
119##
120# @brief Convert a possible filename into a standard form.
121# @param s Filename to process.
122# @return The new filename string.
123def make_filename(s):
124    """Transform argument string into a Sexsuitable filename
125    """
126
127    s = s.strip()
128    s = s.replace(' ', '_')
129    s = s.replace('(', '')
130    s = s.replace(')', '')
131    s = s.replace('__', '_')
132
133    return s
134
135
136##
137# @brief Check that a specified filesystem directory path exists.
138# @param path The dirstory path to check.
139# @param verbose True if this function is to be verbose.
140# @note If directory path doesn't exist, it will be created.
141def check_dir(path, verbose=None):
142    """Check that specified path exists.
143    If path does not exist it will be created if possible
144
145    USAGE:
146       checkdir(path, verbose):
147
148    ARGUMENTS:
149        path -- Directory
150        verbose -- Flag verbose output (default: None)
151
152    RETURN VALUE:
153        Verified path including trailing separator
154    """
155
156    import os.path
157
158    if sys.platform in ['nt', 'dos', 'win32', 'what else?']:
159        unix = 0
160    else:
161        unix = 1
162
163    # add terminal separator, if it's not already there
164    if path[-1] != os.sep:
165        path = path + os.sep
166
167    # expand ~ or ~username in path
168    path = os.path.expanduser(path)
169
170    # create directory if required
171    if not (os.access(path, os.R_OK and os.W_OK) or path == ''):
172        try:
173            exitcode = os.mkdir(path)
174
175            # Change access rights if possible
176            if unix:
177                exitcode = os.system('chmod 775 ' + path)
178            else:
179                pass  # FIXME: What about access rights under Windows?
180
181            if verbose: print 'MESSAGE: Directory', path, 'created.'
182        except:
183            print 'WARNING: Directory', path, 'could not be created.'
184            if unix:
185                path = '/tmp/'
186            else:
187                path = 'C:' + os.sep
188
189            print "Using directory '%s' instead" % path
190
191    return path
192
193
194##
195# @brief Delete directory and all sub-directories.
196# @param path Path to the directory to delete.
197def del_dir(path):
198    """Recursively delete directory path and all its contents
199    """
200
201    if os.path.isdir(path):
202        for file in os.listdir(path):
203            X = os.path.join(path, file)
204
205            if os.path.isdir(X) and not os.path.islink(X):
206                del_dir(X)
207            else:
208                try:
209                    os.remove(X)
210                except:
211                    print "Could not remove file %s" % X
212
213        os.rmdir(path)
214
215
216##
217# @brief ??
218# @param path
219# @param __func__
220# @param verbose True if this function is to be verbose.
221# @note ANOTHER OPTION, IF NEED IN THE FUTURE, Nick B 7/2007
222def rmgeneric(path, func, verbose=False):
223    ERROR_STR= """Error removing %(path)s, %(error)s """
224
225    try:
226        func(path)
227        if verbose: print 'Removed ', path
228    except OSError, (errno, strerror):
229        print ERROR_STR % {'path' : path, 'error': strerror }
230
231
232##
233# @brief Remove directory and all sub-directories.
234# @param path Filesystem path to directory to remove.
235# @param verbose True if this function is to be verbose.
236def removeall(path, verbose=False):
237    if not os.path.isdir(path):
238        return
239
240    for x in os.listdir(path):
241        fullpath = os.path.join(path, x)
242        if os.path.isfile(fullpath):
243            f = os.remove
244            rmgeneric(fullpath, f)
245        elif os.path.isdir(fullpath):
246            removeall(fullpath)
247            f = os.rmdir
248            rmgeneric(fullpath, f, verbose)
249
250
251##
252# @brief Create a standard filename.
253# @param datadir Directory where file is to be created.
254# @param filename Filename 'stem'.
255# @param format Format of the file, becomes filename extension.
256# @param size Size of file, becomes part of filename.
257# @param time Time (float), becomes part of filename.
258# @return The complete filename path, including directory.
259# @note The containing directory is created, if necessary.
260def create_filename(datadir, filename, format, size=None, time=None):
261    FN = check_dir(datadir) + filename
262
263    if size is not None:
264        FN += '_size%d' % size
265
266    if time is not None:
267        FN += '_time%.2f' % time
268
269    FN += '.' + format
270
271    return FN
272
273
274##
275# @brief Get all files with a standard name and a given set of attributes.
276# @param datadir Directory files must be in.
277# @param filename Filename stem.
278# @param format Filename extension.
279# @param size Filename size.
280# @return A list of fielnames (including directory) that match the attributes.
281def get_files(datadir, filename, format, size):
282    """Get all file (names) with given name, size and format
283    """
284
285    import glob
286
287    dir = check_dir(datadir)
288    pattern = dir + os.sep + filename + '_size=%d*.%s' % (size, format)
289
290    return glob.glob(pattern)
291
292
293##
294# @brief Generic class for storing output to e.g. visualisation or checkpointing
295class Data_format:
296    """Generic interface to data formats
297    """
298
299    ##
300    # @brief Instantiate this instance.
301    # @param domain
302    # @param extension
303    # @param mode The mode of the underlying file.
304    def __init__(self, domain, extension, mode=netcdf_mode_w):
305        assert mode[0] in ['r', 'w', 'a'], \
306               "Mode %s must be either:\n" % mode + \
307               "   'w' (write)\n" + \
308               "   'r' (read)\n" + \
309               "   'a' (append)"
310
311        #Create filename
312        self.filename = create_filename(domain.get_datadir(),
313                                        domain.get_name(), extension)
314
315        self.timestep = 0
316        self.domain = domain
317
318        # Exclude ghosts in case this is a parallel domain
319        self.number_of_nodes = domain.number_of_full_nodes
320        self.number_of_volumes = domain.number_of_full_triangles
321        #self.number_of_volumes = len(domain)
322
323        #FIXME: Should we have a general set_precision function?
324
325
326##
327# @brief Class for storing output to e.g. visualisation
328class Data_format_sww(Data_format):
329    """Interface to native NetCDF format (.sww) for storing model output
330
331    There are two kinds of data
332
333    1: Constant data: Vertex coordinates and field values. Stored once
334    2: Variable data: Conserved quantities. Stored once per timestep.
335
336    All data is assumed to reside at vertex locations.
337    """
338
339    ##
340    # @brief Instantiate this instance.
341    # @param domain ??
342    # @param mode Mode of the underlying data file.
343    # @param max_size ??
344    # @param recursion ??
345    # @note Prepare the underlying data file if mode starts with 'w'.
346    def __init__(self, domain, mode=netcdf_mode_w, max_size=2000000000, recursion=False):
347        from Scientific.IO.NetCDF import NetCDFFile
348
349        self.precision = num.Float32 #Use single precision for quantities
350        self.recursion = recursion
351        self.mode = mode
352        if hasattr(domain, 'max_size'):
353            self.max_size = domain.max_size #file size max is 2Gig
354        else:
355            self.max_size = max_size
356        if hasattr(domain, 'minimum_storable_height'):
357            self.minimum_storable_height = domain.minimum_storable_height
358        else:
359            self.minimum_storable_height = default_minimum_storable_height
360
361        # call owning constructor
362        Data_format.__init__(self, domain, 'sww', mode)
363
364        # NetCDF file definition
365        fid = NetCDFFile(self.filename, mode)
366        if mode[0] == 'w':
367            description = 'Output from anuga.abstract_2d_finite_volumes ' \
368                          'suitable for plotting'
369            self.writer = Write_sww()
370            self.writer.store_header(fid,
371                                     domain.starttime,
372                                     self.number_of_volumes,
373                                     self.domain.number_of_full_nodes,
374                                     description=description,
375                                     smoothing=domain.smooth,
376                                     order=domain.default_order,
377                                     sww_precision=self.precision)
378
379            # Extra optional information
380            if hasattr(domain, 'texture'):
381                fid.texture = domain.texture
382
383            if domain.quantities_to_be_monitored is not None:
384                fid.createDimension('singleton', 1)
385                fid.createDimension('two', 2)
386
387                poly = domain.monitor_polygon
388                if poly is not None:
389                    N = len(poly)
390                    fid.createDimension('polygon_length', N)
391                    fid.createVariable('extrema.polygon',
392                                       self.precision,
393                                       ('polygon_length', 'two'))
394                    fid.variables['extrema.polygon'][:] = poly
395
396                interval = domain.monitor_time_interval
397                if interval is not None:
398                    fid.createVariable('extrema.time_interval',
399                                       self.precision,
400                                       ('two',))
401                    fid.variables['extrema.time_interval'][:] = interval
402
403                for q in domain.quantities_to_be_monitored:
404                    fid.createVariable(q + '.extrema', self.precision,
405                                       ('numbers_in_range',))
406                    fid.createVariable(q + '.min_location', self.precision,
407                                       ('numbers_in_range',))
408                    fid.createVariable(q + '.max_location', self.precision,
409                                       ('numbers_in_range',))
410                    fid.createVariable(q + '.min_time', self.precision,
411                                       ('singleton',))
412                    fid.createVariable(q + '.max_time', self.precision,
413                                       ('singleton',))
414
415        fid.close()
416
417    ##
418    # @brief Store connectivity data into the underlying data file.
419    def store_connectivity(self):
420        """Specialisation of store_connectivity for net CDF format
421
422        Writes x,y,z coordinates of triangles constituting
423        the bed elevation.
424        """
425
426        from Scientific.IO.NetCDF import NetCDFFile
427
428        domain = self.domain
429
430        # append to the NetCDF file
431        fid = NetCDFFile(self.filename, netcdf_mode_a)
432
433#        # Get the variables
434#        x = fid.variables['x']
435#        y = fid.variables['y']
436#        z = fid.variables['elevation']
437#        volumes = fid.variables['volumes']
438
439        # Get X, Y and bed elevation Z
440        Q = domain.quantities['elevation']
441        X,Y,Z,V = Q.get_vertex_values(xy=True, precision=self.precision)
442
443        # store the connectivity data
444        points = num.concatenate( (X[:,num.NewAxis],Y[:,num.NewAxis]), axis=1 )
445        self.writer.store_triangulation(fid,
446                                        points,
447#                                        V.astype(volumes.typecode()),
448                                        V.astype(num.Float32),
449                                        Z,
450                                        points_georeference=\
451                                            domain.geo_reference)
452
453        fid.close()
454
455    ##
456    # @brief Store time and named quantities to the underlying data file.
457    # @param names The names of the quantities to store.
458    # @note If 'names' not supplied, store a standard set of quantities.
459    def store_timestep(self, names=None):
460        """Store time and named quantities to file
461        """
462
463        from Scientific.IO.NetCDF import NetCDFFile
464        import types
465        from time import sleep
466        from os import stat
467
468        if names is None:
469            # Standard shallow water wave equation quantitites in ANUGA
470            names = ['stage', 'xmomentum', 'ymomentum']
471
472        # Get NetCDF
473        retries = 0
474        file_open = False
475        while not file_open and retries < 10:
476            try:
477                fid = NetCDFFile(self.filename, netcdf_mode_a) # Open existing file
478            except IOError:
479                # This could happen if someone was reading the file.
480                # In that case, wait a while and try again
481                msg = 'Warning (store_timestep): File %s could not be opened' \
482                      % self.filename
483                msg += ' - trying step %s again' % self.domain.time
484                print msg
485                retries += 1
486                sleep(1)
487            else:
488                file_open = True
489
490        if not file_open:
491            msg = 'File %s could not be opened for append' % self.filename
492            raise DataFileNotOpenError, msg
493
494        # Check to see if the file is already too big:
495        time = fid.variables['time']
496        i = len(time) + 1
497        file_size = stat(self.filename)[6]
498        file_size_increase = file_size / i
499        if file_size + file_size_increase > self.max_size * 2**self.recursion:
500            # In order to get the file name and start time correct,
501            # I change the domain.filename and domain.starttime.
502            # This is the only way to do this without changing
503            # other modules (I think).
504
505            # Write a filename addon that won't break swollens reader
506            # (10.sww is bad)
507            filename_ext = '_time_%s' % self.domain.time
508            filename_ext = filename_ext.replace('.', '_')
509
510            # Remember the old filename, then give domain a
511            # name with the extension
512            old_domain_filename = self.domain.get_name()
513            if not self.recursion:
514                self.domain.set_name(old_domain_filename + filename_ext)
515
516            # Change the domain starttime to the current time
517            old_domain_starttime = self.domain.starttime
518            self.domain.starttime = self.domain.time
519
520            # Build a new data_structure.
521            next_data_structure = Data_format_sww(self.domain, mode=self.mode,
522                                                  max_size=self.max_size,
523                                                  recursion=self.recursion+1)
524            if not self.recursion:
525                print '    file_size = %s' % file_size
526                print '    saving file to %s' % next_data_structure.filename
527
528            #set up the new data_structure
529            self.domain.writer = next_data_structure
530
531            #FIXME - could be cleaner to use domain.store_timestep etc.
532            next_data_structure.store_connectivity()
533            next_data_structure.store_timestep(names)
534            fid.sync()
535            fid.close()
536
537            #restore the old starttime and filename
538            self.domain.starttime = old_domain_starttime
539            self.domain.set_name(old_domain_filename)
540        else:
541            self.recursion = False
542            domain = self.domain
543
544            # Get the variables
545            time = fid.variables['time']
546            stage = fid.variables['stage']
547            xmomentum = fid.variables['xmomentum']
548            ymomentum = fid.variables['ymomentum']
549            i = len(time)
550            if type(names) not in [types.ListType, types.TupleType]:
551                names = [names]
552
553            if 'stage' in names \
554               and 'xmomentum' in names \
555               and 'ymomentum' in names:
556                # Get stage, elevation, depth and select only those
557                # values where minimum_storable_height is exceeded
558                Q = domain.quantities['stage']
559                A, _ = Q.get_vertex_values(xy=False, precision=self.precision)
560                z = fid.variables['elevation']
561
562                storable_indices = (A-z[:] >= self.minimum_storable_height)
563                stage = num.choose(storable_indices, (z[:], A))
564
565                # Define a zero vector of same size and type as A
566                # for use with momenta
567                null = num.zeros(num.size(A), A.typecode())
568
569                # Get xmomentum where depth exceeds minimum_storable_height
570                Q = domain.quantities['xmomentum']
571                xmom, _ = Q.get_vertex_values(xy=False,
572                                              precision=self.precision)
573                xmomentum = num.choose(storable_indices, (null, xmom))
574
575
576                # Get ymomentum where depth exceeds minimum_storable_height
577                Q = domain.quantities['ymomentum']
578                ymom, _ = Q.get_vertex_values(xy=False,
579                                              precision=self.precision)
580                ymomentum = num.choose(storable_indices, (null, ymom))
581
582                # Write quantities to underlying data  file
583                self.writer.store_quantities(fid,
584                                             time=self.domain.time,
585                                             sww_precision=self.precision,
586                                             stage=stage,
587                                             xmomentum=xmomentum,
588                                             ymomentum=ymomentum)
589            else:
590                msg = 'Quantities stored must be: stage, xmomentum, ymomentum, '
591                msg += 'but I got: ' + str(names)
592                raise Exception, msg
593
594            # Update extrema if requested
595            domain = self.domain
596            if domain.quantities_to_be_monitored is not None:
597                for q, info in domain.quantities_to_be_monitored.items():
598                    if info['min'] is not None:
599                        fid.variables[q + '.extrema'][0] = info['min']
600                        fid.variables[q + '.min_location'][:] = \
601                                        info['min_location']
602                        fid.variables[q + '.min_time'][0] = info['min_time']
603
604                    if info['max'] is not None:
605                        fid.variables[q + '.extrema'][1] = info['max']
606                        fid.variables[q + '.max_location'][:] = \
607                                        info['max_location']
608                        fid.variables[q + '.max_time'][0] = info['max_time']
609
610            # Flush and close
611            fid.sync()
612            fid.close()
613
614
615##
616# @brief Class for handling checkpoints data
617class Data_format_cpt(Data_format):
618    """Interface to native NetCDF format (.cpt)
619    """
620
621    ##
622    # @brief Initialize this instantiation.
623    # @param domain ??
624    # @param mode Mode of underlying data file (default WRITE).
625    def __init__(self, domain, mode=netcdf_mode_w):
626        from Scientific.IO.NetCDF import NetCDFFile
627
628        self.precision = num.Float #Use full precision
629
630        Data_format.__init__(self, domain, 'sww', mode)
631
632        # NetCDF file definition
633        fid = NetCDFFile(self.filename, mode)
634        if mode[0] == 'w':
635            #Create new file
636            fid.institution = 'Geoscience Australia'
637            fid.description = 'Checkpoint data'
638            #fid.smooth = domain.smooth
639            fid.order = domain.default_order
640
641            # dimension definitions
642            fid.createDimension('number_of_volumes', self.number_of_volumes)
643            fid.createDimension('number_of_vertices', 3)
644
645            #Store info at all vertices (no smoothing)
646            fid.createDimension('number_of_points', 3*self.number_of_volumes)
647            fid.createDimension('number_of_timesteps', None) #extensible
648
649            # variable definitions
650
651            #Mesh
652            fid.createVariable('x', self.precision, ('number_of_points',))
653            fid.createVariable('y', self.precision, ('number_of_points',))
654
655
656            fid.createVariable('volumes', num.Int, ('number_of_volumes',
657                                                    'number_of_vertices'))
658
659            fid.createVariable('time', self.precision, ('number_of_timesteps',))
660
661            #Allocate space for all quantities
662            for name in domain.quantities.keys():
663                fid.createVariable(name, self.precision,
664                                   ('number_of_timesteps',
665                                    'number_of_points'))
666
667        fid.close()
668
669    ##
670    # @brief Store connectivity data to underlying data file.
671    def store_checkpoint(self):
672        """Write x,y coordinates of triangles.
673        Write connectivity (
674        constituting
675        the bed elevation.
676        """
677
678        from Scientific.IO.NetCDF import NetCDFFile
679
680        domain = self.domain
681
682        #Get NetCDF
683        fid = NetCDFFile(self.filename, netcdf_mode_a)
684
685        # Get the variables
686        x = fid.variables['x']
687        y = fid.variables['y']
688
689        volumes = fid.variables['volumes']
690
691        # Get X, Y and bed elevation Z
692        Q = domain.quantities['elevation']
693        X,Y,Z,V = Q.get_vertex_values(xy=True, precision=self.precision)
694
695        x[:] = X.astype(self.precision)
696        y[:] = Y.astype(self.precision)
697        z[:] = Z.astype(self.precision)
698
699        volumes[:] = V
700
701        fid.close()
702
703    ##
704    # @brief Store tiem and named quantities to underlying data file.
705    # @param name
706    def store_timestep(self, name):
707        """Store time and named quantity to file
708        """
709
710        from Scientific.IO.NetCDF import NetCDFFile
711        from time import sleep
712
713        #Get NetCDF
714        retries = 0
715        file_open = False
716        while not file_open and retries < 10:
717            try:
718                fid = NetCDFFile(self.filename, netcdf_mode_a)
719            except IOError:
720                #This could happen if someone was reading the file.
721                #In that case, wait a while and try again
722                msg = 'Warning (store_timestep): File %s could not be opened' \
723                      ' - trying again' % self.filename
724                print msg
725                retries += 1
726                sleep(1)
727            else:
728                file_open = True
729
730        if not file_open:
731            msg = 'File %s could not be opened for append' % self.filename
732            raise DataFileNotOpenError, msg
733
734        domain = self.domain
735
736        # Get the variables
737        time = fid.variables['time']
738        stage = fid.variables['stage']
739        i = len(time)
740
741        #Store stage
742        time[i] = self.domain.time
743
744        # Get quantity
745        Q = domain.quantities[name]
746        A,V = Q.get_vertex_values(xy=False, precision=self.precision)
747
748        stage[i,:] = A.astype(self.precision)
749
750        #Flush and close
751        fid.sync()
752        fid.close()
753
754
755##
756# @brief Class for National Exposure Database storage (NEXIS).
757
758LAT_TITLE = 'LATITUDE'
759LONG_TITLE = 'LONGITUDE'
760X_TITLE = 'x'
761Y_TITLE = 'y'
762
763class Exposure_csv:
764
765    ##
766    # @brief Instantiate this instance.
767    # @param file_name Name of underlying data file.
768    # @param latitude_title ??
769    # @param longitude_title ??
770    # @param is_x_y_locations ??
771    # @param x_title ??
772    # @param y_title ??
773    # @param refine_polygon ??
774    # @param title_check_list ??
775    def __init__(self,file_name, latitude_title=LAT_TITLE,
776                 longitude_title=LONG_TITLE, is_x_y_locations=None,
777                 x_title=X_TITLE, y_title=Y_TITLE,
778                 refine_polygon=None, title_check_list=None):
779        """
780        This class is for handling the exposure csv file.
781        It reads the file in and converts the lats and longs to a geospatial
782        data object.
783        Use the methods to read and write columns.
784
785        The format of the csv files it reads is;
786           The first row is a title row.
787           comma's are the delimiters
788           each column is a 'set' of data
789
790        Feel free to use/expand it to read other csv files.
791
792        It is not for adding and deleting rows
793
794        Can geospatial handle string attributes? It's not made for them.
795        Currently it can't load and save string att's.
796
797        So just use geospatial to hold the x, y and georef? Bad, since
798        different att's are in diferent structures.  Not so bad, the info
799        to write if the .csv file is saved is in attribute_dic
800
801        The location info is in the geospatial attribute.
802        """
803
804        self._file_name = file_name
805        self._geospatial = None #
806
807        # self._attribute_dic is a dictionary.
808        #The keys are the column titles.
809        #The values are lists of column data
810
811        # self._title_index_dic is a dictionary.
812        #The keys are the column titles.
813        #The values are the index positions of file columns.
814        self._attribute_dic, self._title_index_dic = \
815            csv2dict(self._file_name, title_check_list=title_check_list)
816        try:
817            #Have code here that handles caps or lower
818            lats = self._attribute_dic[latitude_title]
819            longs = self._attribute_dic[longitude_title]
820        except KeyError:
821            # maybe a warning..
822            #Let's see if this works..
823            if False != is_x_y_locations:
824                is_x_y_locations = True
825            pass
826        else:
827            self._geospatial = Geospatial_data(latitudes=lats,
828                                               longitudes=longs)
829
830        if is_x_y_locations is True:
831            if self._geospatial is not None:
832                pass #fixme throw an error
833            try:
834                xs = self._attribute_dic[x_title]
835                ys = self._attribute_dic[y_title]
836                points = [[float(i),float(j)] for i,j in map(None,xs,ys)]
837            except KeyError:
838                # maybe a warning..
839                msg = "Could not find location information."
840                raise TitleValueError, msg
841            else:
842                self._geospatial = Geospatial_data(data_points=points)
843
844        # create a list of points that are in the refining_polygon
845        # described by a list of indexes representing the points
846
847    ##
848    # @brief Create a comparison method.
849    # @param self This object.
850    # @param other The other object.
851    # @return True if objects are 'same'.
852    def __cmp__(self, other):
853        #check that 'other' is an instance of this class
854        if isinstance(self, type(other)):
855            result = cmp(self._attribute_dic, other._attribute_dic)
856            if result <> 0:
857                return result
858
859            # The order of the columns is important. Therefore..
860            result = cmp(self._title_index_dic, other._title_index_dic)
861            if result <> 0:
862                return result
863            for self_ls, other_ls in map(None, self._attribute_dic,
864                                         other._attribute_dic):
865                result = cmp(self._attribute_dic[self_ls],
866                             other._attribute_dic[other_ls])
867                if result <> 0:
868                    return result
869            return 0
870        else:
871            return 1
872
873    ##
874    # @brief Get a list of column values given a column name.
875    # @param column_name The name of the column to get values from.
876    # @param use_refind_polygon Unused??
877    def get_column(self, column_name, use_refind_polygon=False):
878        """
879        Given a column name return a list of the column values
880
881        Note, the type of the values will be String!
882        do this to change a list of strings to a list of floats
883        time = [float(x) for x in time]
884
885        Not implemented:
886        if use_refind_polygon is True, only return values in the
887        refined polygon
888        """
889
890        if not self._attribute_dic.has_key(column_name):
891            msg = 'There is no column called %s!' % column_name
892            raise TitleValueError, msg
893
894        return self._attribute_dic[column_name]
895
896    ##
897    # @brief ??
898    # @param value_column_name ??
899    # @param known_column_name ??
900    # @param known_values ??
901    # @param use_refind_polygon ??
902    def get_value(self, value_column_name, known_column_name,
903                  known_values, use_refind_polygon=False):
904        """
905        Do linear interpolation on the known_colum, using the known_value,
906        to return a value of the column_value_name.
907        """
908
909        pass
910
911    ##
912    # @brief Get a geospatial object that describes the locations.
913    # @param use_refind_polygon Unused??
914    def get_location(self, use_refind_polygon=False):
915        """
916        Return a geospatial object which describes the
917        locations of the location file.
918
919        Note, if there is not location info, this returns None.
920
921        Not implemented:
922        if use_refind_polygon is True, only return values in the
923        refined polygon
924        """
925
926        return self._geospatial
927
928    ##
929    # @brief Add column to 'end' of CSV data.
930    # @param column_name The new column name.
931    # @param column_values The new column values.
932    # @param overwrite If True, overwrites last column, doesn't add at end.
933    def set_column(self, column_name, column_values, overwrite=False):
934        """
935        Add a column to the 'end' (with the right most column being the end)
936        of the csv file.
937
938        Set overwrite to True if you want to overwrite a column.
939
940        Note, in column_name white space is removed and case is not checked.
941        Precondition
942        The column_name and column_values cannot have comma's in it.
943        """
944
945        # sanity checks
946        value_row_count = \
947                len(self._attribute_dic[self._title_index_dic.keys()[0]])
948        if len(column_values) <> value_row_count:
949            msg = 'The number of column values must equal the number of rows.'
950            raise DataMissingValuesError, msg
951
952        # check new column name isn't already used, and we aren't overwriting
953        if self._attribute_dic.has_key(column_name):
954            if not overwrite:
955                msg = 'Column name %s already in use!' % column_name
956                raise TitleValueError, msg
957        else:
958            # New title.  Add it to the title index.
959            self._title_index_dic[column_name] = len(self._title_index_dic)
960
961        self._attribute_dic[column_name] = column_values
962
963    ##
964    # @brief Save the exposure CSV  file.
965    # @param file_name If supplied, use this filename, not original.
966    def save(self, file_name=None):
967        """
968        Save the exposure csv file
969        """
970
971        if file_name is None:
972            file_name = self._file_name
973
974        fd = open(file_name, 'wb')
975        writer = csv.writer(fd)
976
977        #Write the title to a cvs file
978        line = [None] * len(self._title_index_dic)
979        for title in self._title_index_dic.iterkeys():
980            line[self._title_index_dic[title]] = title
981        writer.writerow(line)
982
983        # Write the values to a cvs file
984        value_row_count = \
985                len(self._attribute_dic[self._title_index_dic.keys()[0]])
986        for row_i in range(value_row_count):
987            line = [None] * len(self._title_index_dic)
988            for title in self._title_index_dic.iterkeys():
989                line[self._title_index_dic[title]] = \
990                     self._attribute_dic[title][row_i]
991            writer.writerow(line)
992
993
994def csv2building_polygons(file_name,
995                          floor_height=3,
996                          clipping_polygons=None):
997    """
998    Convert CSV files of the form:
999
1000    easting,northing,id,floors
1001    422664.22,870785.46,2,0
1002    422672.48,870780.14,2,0
1003    422668.17,870772.62,2,0
1004    422660.35,870777.17,2,0
1005    422664.22,870785.46,2,0
1006    422661.30,871215.06,3,1
1007    422667.50,871215.70,3,1
1008    422668.30,871204.86,3,1
1009    422662.21,871204.33,3,1
1010    422661.30,871215.06,3,1
1011
1012    to a dictionary of polygons with id as key.
1013    The associated number of floors are converted to m above MSL and
1014    returned as a separate dictionary also keyed by id.
1015   
1016    Optional parameter floor_height is the height of each building story.
1017    Optional parameter clipping_olygons is a list of polygons selecting
1018    buildings. Any building not in these polygons will be omitted.
1019   
1020    See csv2polygons for more details
1021    """
1022
1023    polygons, values = csv2polygons(file_name,
1024                                    value_name='floors',
1025                                    clipping_polygons=None)   
1026
1027   
1028    heights = {}
1029    for key in values.keys():
1030        v = float(values[key])
1031        heights[key] = v*floor_height
1032       
1033    return polygons, heights               
1034           
1035
1036##
1037# @brief Convert CSV file into a dictionary of polygons and associated values.
1038# @param filename The path to the file to read, value_name name for the 4th column
1039def csv2polygons(file_name,
1040                 value_name='value',
1041                 clipping_polygons=None):
1042    """
1043    Convert CSV files of the form:
1044
1045    easting,northing,id,value
1046    422664.22,870785.46,2,0
1047    422672.48,870780.14,2,0
1048    422668.17,870772.62,2,0
1049    422660.35,870777.17,2,0
1050    422664.22,870785.46,2,0
1051    422661.30,871215.06,3,1
1052    422667.50,871215.70,3,1
1053    422668.30,871204.86,3,1
1054    422662.21,871204.33,3,1
1055    422661.30,871215.06,3,1
1056
1057    to a dictionary of polygons with id as key.
1058    The associated values are returned as a separate dictionary also keyed by id.
1059
1060
1061    easting: x coordinate relative to zone implied by the model
1062    northing: y coordinate relative to zone implied by the model   
1063    id: tag for polygon comprising points with this tag
1064    value: numeral associated with each polygon. These must be the same for all points in each polygon.
1065   
1066    The last header, value, can take on other names such as roughness, floors, etc - or it can be omitted
1067    in which case the returned values will be None
1068   
1069    Eastings and Northings will be returned as floating point values while
1070    id and values will be returned as strings.
1071
1072    Optional argument: clipping_polygons will select only those polygons that are
1073    fully within one or more of the clipping_polygons. In other words any polygon from
1074    the csv file which has at least one point not inside one of the clipping polygons
1075    will be excluded
1076   
1077    See underlying function csv2dict for more details.
1078    """
1079
1080    X, _ = csv2dict(file_name)
1081
1082    msg = 'Polygon csv file must have 3 or 4 columns'
1083    assert len(X.keys()) in [3, 4], msg
1084   
1085    msg = 'Did not find expected column header: easting'
1086    assert 'easting' in X.keys(), msg
1087   
1088    msg = 'Did not find expected column header: northing'   
1089    assert 'northing' in X.keys(), northing
1090   
1091    msg = 'Did not find expected column header: northing'       
1092    assert 'id' in X.keys(), msg
1093   
1094    if value_name is not None:
1095        msg = 'Did not find expected column header: %s' % value_name       
1096        assert value_name in X.keys(), msg   
1097   
1098    polygons = {}
1099    if len(X.keys()) == 4:
1100        values = {}
1101    else:
1102        values = None
1103
1104    # Loop through entries and compose polygons
1105    excluded_polygons={}
1106    past_ids = {}
1107    last_id = None
1108    for i, id in enumerate(X['id']):
1109
1110        # Check for duplicate polygons
1111        if id in past_ids:
1112            msg = 'Polygon %s was duplicated in line %d' % (id, i)
1113            raise Exception, msg
1114       
1115        if id not in polygons:
1116            # Start new polygon
1117            polygons[id] = []
1118            if values is not None:
1119                values[id] = X[value_name][i]
1120
1121            # Keep track of previous polygon ids
1122            if last_id is not None:
1123                past_ids[last_id] = i
1124           
1125        # Append this point to current polygon
1126        point = [float(X['easting'][i]), float(X['northing'][i])]
1127
1128        if clipping_polygons is not None:
1129            exclude=True
1130            for clipping_polygon in clipping_polygons:
1131                if inside_polygon(point, clipping_polygon):
1132                    exclude=False
1133                    break
1134               
1135            if exclude is True:
1136                excluded_polygons[id]=True
1137
1138        polygons[id].append(point)   
1139           
1140        # Check that value is the same across each polygon
1141        msg = 'Values must be the same across each polygon.'
1142        msg += 'I got %s in line %d but it should have been %s' % (X[value_name][i], i, values[id])
1143        assert values[id] == X[value_name][i], msg
1144
1145        last_id = id
1146
1147    # Weed out polygons that were not wholly inside clipping polygons
1148    for id in excluded_polygons:
1149        del polygons[id]
1150       
1151    return polygons, values
1152
1153
1154           
1155           
1156##
1157# @brief Convert CSV file to a dictionary of arrays.
1158# @param file_name The path to the file to read.
1159def csv2array(file_name):
1160    """
1161    Convert CSV files of the form:
1162
1163    time, discharge, velocity
1164    0.0,  1.2,       0.0
1165    0.1,  3.2,       1.1
1166    ...
1167
1168    to a dictionary of numeric arrays.
1169
1170
1171    See underlying function csv2dict for more details.
1172    """
1173
1174    X, _ = csv2dict(file_name)
1175
1176    Y = {}
1177    for key in X.keys():
1178        Y[key] = num.array([float(x) for x in X[key]])
1179
1180    return Y
1181
1182
1183##
1184# @brief Read a CSV file and convert to a dictionary of {key: column}.
1185# @param file_name The path to the file to read.
1186# @param title_check_list List of titles that *must* be columns in the file.
1187# @return Two dicts: ({key:column}, {title:index}).
1188# @note WARNING: Values are returned as strings.
1189def csv2dict(file_name, title_check_list=None):
1190    """
1191    Load in the csv as a dictionary, title as key and column info as value.
1192    Also, create a dictionary, title as key and column index as value,
1193    to keep track of the column order.
1194
1195    Two dictionaries are returned.
1196
1197    WARNING: Values are returned as strings.
1198             Do this to change a list of strings to a list of floats
1199                 time = [float(x) for x in time]
1200    """
1201
1202    # FIXME(Ole): Consider dealing with files without headers
1203    # FIXME(Ole): Consider a wrapper automatically converting text fields
1204    #             to the right type by trying for: int, float, string
1205   
1206    attribute_dic = {}
1207    title_index_dic = {}
1208    titles_stripped = [] # List of titles
1209
1210    reader = csv.reader(file(file_name))
1211
1212    # Read in and manipulate the title info
1213    titles = reader.next()
1214    for i, title in enumerate(titles):
1215        header = title.strip()
1216        titles_stripped.append(header)
1217        title_index_dic[header] = i
1218    title_count = len(titles_stripped)
1219
1220    # Check required columns
1221    if title_check_list is not None:
1222        for title_check in title_check_list:
1223            if not title_index_dic.has_key(title_check):
1224                msg = 'Reading error. This row is not present %s' % title_check
1225                raise IOError, msg
1226
1227    # Create a dictionary of column values, indexed by column title
1228    for line in reader:
1229        n = len(line) # Number of entries
1230        if n != title_count:
1231            msg = 'Entry in file %s had %d columns ' % (file_name, n)
1232            msg += 'although there were %d headers' % title_count
1233            raise IOError, msg
1234        for i, value in enumerate(line):
1235            attribute_dic.setdefault(titles_stripped[i], []).append(value)
1236
1237    return attribute_dic, title_index_dic
1238
1239
1240##
1241# @brief
1242# @param filename
1243# @param x
1244# @param y
1245# @param z
1246def write_obj(filename, x, y, z):
1247    """Store x,y,z vectors into filename (obj format).
1248
1249       Vectors are assumed to have dimension (M,3) where
1250       M corresponds to the number elements.
1251       triangles are assumed to be disconnected
1252
1253       The three numbers in each vector correspond to three vertices,
1254
1255       e.g. the x coordinate of vertex 1 of element i is in x[i,1]
1256    """
1257
1258    import os.path
1259
1260    root, ext = os.path.splitext(filename)
1261    if ext == '.obj':
1262        FN = filename
1263    else:
1264        FN = filename + '.obj'
1265
1266    outfile = open(FN, 'wb')
1267    outfile.write("# Triangulation as an obj file\n")
1268
1269    M, N = x.shape
1270    assert N == 3  #Assuming three vertices per element
1271
1272    for i in range(M):
1273        for j in range(N):
1274            outfile.write("v %f %f %f\n" % (x[i,j], y[i,j], z[i,j]))
1275
1276    for i in range(M):
1277        base = i * N
1278        outfile.write("f %d %d %d\n" % (base+1, base+2, base+3))
1279
1280    outfile.close()
1281
1282
1283#########################################################
1284#Conversion routines
1285########################################################
1286
1287##
1288# @brief Convert SWW data to OBJ data.
1289# @param basefilename Stem of filename, needs size and extension added.
1290# @param size The number of lines to write.
1291def sww2obj(basefilename, size):
1292    """Convert netcdf based data output to obj
1293    """
1294
1295    from Scientific.IO.NetCDF import NetCDFFile
1296
1297    # Get NetCDF
1298    FN = create_filename('.', basefilename, 'sww', size)
1299    print 'Reading from ', FN
1300    fid = NetCDFFile(FN, netcdf_mode_r)  #Open existing file for read
1301
1302    # Get the variables
1303    x = fid.variables['x']
1304    y = fid.variables['y']
1305    z = fid.variables['elevation']
1306    time = fid.variables['time']
1307    stage = fid.variables['stage']
1308
1309    M = size  #Number of lines
1310    xx = num.zeros((M,3), num.Float)
1311    yy = num.zeros((M,3), num.Float)
1312    zz = num.zeros((M,3), num.Float)
1313
1314    for i in range(M):
1315        for j in range(3):
1316            xx[i,j] = x[i+j*M]
1317            yy[i,j] = y[i+j*M]
1318            zz[i,j] = z[i+j*M]
1319
1320    # Write obj for bathymetry
1321    FN = create_filename('.', basefilename, 'obj', size)
1322    write_obj(FN,xx,yy,zz)
1323
1324    # Now read all the data with variable information, combine with
1325    # x,y info and store as obj
1326    for k in range(len(time)):
1327        t = time[k]
1328        print 'Processing timestep %f' %t
1329
1330        for i in range(M):
1331            for j in range(3):
1332                zz[i,j] = stage[k,i+j*M]
1333
1334        #Write obj for variable data
1335        #FN = create_filename(basefilename, 'obj', size, time=t)
1336        FN = create_filename('.', basefilename[:5], 'obj', size, time=t)
1337        write_obj(FN, xx, yy, zz)
1338
1339
1340##
1341# @brief
1342# @param basefilename Stem of filename, needs size and extension added.
1343def dat2obj(basefilename):
1344    """Convert line based data output to obj
1345    FIXME: Obsolete?
1346    """
1347
1348    import glob, os
1349    from anuga.config import data_dir
1350
1351    # Get bathymetry and x,y's
1352    lines = open(data_dir+os.sep+basefilename+'_geometry.dat', 'r').readlines()
1353
1354    M = len(lines)  #Number of lines
1355    x = num.zeros((M,3), num.Float)
1356    y = num.zeros((M,3), num.Float)
1357    z = num.zeros((M,3), num.Float)
1358
1359    for i, line in enumerate(lines):
1360        tokens = line.split()
1361        values = map(float, tokens)
1362
1363        for j in range(3):
1364            x[i,j] = values[j*3]
1365            y[i,j] = values[j*3+1]
1366            z[i,j] = values[j*3+2]
1367
1368    # Write obj for bathymetry
1369    write_obj(data_dir + os.sep + basefilename + '_geometry', x, y, z)
1370
1371    # Now read all the data files with variable information, combine with
1372    # x,y info and store as obj.
1373
1374    files = glob.glob(data_dir + os.sep + basefilename + '*.dat')
1375    for filename in files:
1376        print 'Processing %s' % filename
1377
1378        lines = open(data_dir + os.sep + filename, 'r').readlines()
1379        assert len(lines) == M
1380        root, ext = os.path.splitext(filename)
1381
1382        # Get time from filename
1383        i0 = filename.find('_time=')
1384        if i0 == -1:
1385            #Skip bathymetry file
1386            continue
1387
1388        i0 += 6  #Position where time starts
1389        i1 = filename.find('.dat')
1390
1391        if i1 > i0:
1392            t = float(filename[i0:i1])
1393        else:
1394            raise DataTimeError, 'Hmmmm'
1395
1396        for i, line in enumerate(lines):
1397            tokens = line.split()
1398            values = map(float,tokens)
1399
1400            for j in range(3):
1401                z[i,j] = values[j]
1402
1403        # Write obj for variable data
1404        write_obj(data_dir + os.sep + basefilename + '_time=%.4f' % t, x, y, z)
1405
1406
1407##
1408# @brief Filter data file, selecting timesteps first:step:last.
1409# @param filename1 Data file to filter.
1410# @param filename2 File to write filtered timesteps to.
1411# @param first First timestep.
1412# @param last Last timestep.
1413# @param step Timestep stride.
1414def filter_netcdf(filename1, filename2, first=0, last=None, step=1):
1415    """Read netcdf filename1, pick timesteps first:step:last and save to
1416    nettcdf file filename2
1417    """
1418
1419    from Scientific.IO.NetCDF import NetCDFFile
1420
1421    # Get NetCDF
1422    infile = NetCDFFile(filename1, netcdf_mode_r)  #Open existing file for read
1423    outfile = NetCDFFile(filename2, netcdf_mode_w)  #Open new file
1424
1425    # Copy dimensions
1426    for d in infile.dimensions:
1427        outfile.createDimension(d, infile.dimensions[d])
1428
1429    # Copy variable definitions
1430    for name in infile.variables:
1431        var = infile.variables[name]
1432        outfile.createVariable(name, var.typecode(), var.dimensions)
1433
1434    # Copy the static variables
1435    for name in infile.variables:
1436        if name == 'time' or name == 'stage':
1437            pass
1438        else:
1439            outfile.variables[name][:] = infile.variables[name][:]
1440
1441    # Copy selected timesteps
1442    time = infile.variables['time']
1443    stage = infile.variables['stage']
1444
1445    newtime = outfile.variables['time']
1446    newstage = outfile.variables['stage']
1447
1448    if last is None:
1449        last = len(time)
1450
1451    selection = range(first, last, step)
1452    for i, j in enumerate(selection):
1453        print 'Copying timestep %d of %d (%f)' % (j, last-first, time[j])
1454        newtime[i] = time[j]
1455        newstage[i,:] = stage[j,:]
1456
1457    # Close
1458    infile.close()
1459    outfile.close()
1460
1461
1462##
1463# @brief Return instance of class of given format using filename.
1464# @param domain Data domain (eg, 'sww', etc).
1465# @param mode The mode to open domain in.
1466# @return A class instance of required domain and mode.
1467#Get data objects
1468def get_dataobject(domain, mode=netcdf_mode_w):
1469    """Return instance of class of given format using filename
1470    """
1471
1472    cls = eval('Data_format_%s' % domain.format)
1473    return cls(domain, mode)
1474
1475
1476##
1477# @brief Convert DEM data  to PTS data.
1478# @param basename_in Stem of input filename.
1479# @param basename_out Stem of output filename.
1480# @param easting_min
1481# @param easting_max
1482# @param northing_min
1483# @param northing_max
1484# @param use_cache
1485# @param verbose
1486# @return
1487def dem2pts(basename_in, basename_out=None,
1488            easting_min=None, easting_max=None,
1489            northing_min=None, northing_max=None,
1490            use_cache=False, verbose=False,):
1491    """Read Digitial Elevation model from the following NetCDF format (.dem)
1492
1493    Example:
1494
1495    ncols         3121
1496    nrows         1800
1497    xllcorner     722000
1498    yllcorner     5893000
1499    cellsize      25
1500    NODATA_value  -9999
1501    138.3698 137.4194 136.5062 135.5558 ..........
1502
1503    Convert to NetCDF pts format which is
1504
1505    points:  (Nx2) Float array
1506    elevation: N Float array
1507    """
1508
1509    kwargs = {'basename_out': basename_out,
1510              'easting_min': easting_min,
1511              'easting_max': easting_max,
1512              'northing_min': northing_min,
1513              'northing_max': northing_max,
1514              'verbose': verbose}
1515
1516    if use_cache is True:
1517        from caching import cache
1518        result = cache(_dem2pts, basename_in, kwargs,
1519                       dependencies = [basename_in + '.dem'],
1520                       verbose = verbose)
1521
1522    else:
1523        result = apply(_dem2pts, [basename_in], kwargs)
1524
1525    return result
1526
1527
1528##
1529# @brief
1530# @param basename_in
1531# @param basename_out
1532# @param verbose
1533# @param easting_min
1534# @param easting_max
1535# @param northing_min
1536# @param northing_max
1537def _dem2pts(basename_in, basename_out=None, verbose=False,
1538            easting_min=None, easting_max=None,
1539            northing_min=None, northing_max=None):
1540    """Read Digitial Elevation model from the following NetCDF format (.dem)
1541
1542    Internal function. See public function dem2pts for details.
1543    """
1544
1545    # FIXME: Can this be written feasibly using write_pts?
1546
1547    import os
1548    from Scientific.IO.NetCDF import NetCDFFile
1549
1550    root = basename_in
1551
1552    # Get NetCDF
1553    infile = NetCDFFile(root + '.dem', netcdf_mode_r) 
1554
1555    if verbose: print 'Reading DEM from %s' %(root + '.dem')
1556
1557    ncols = infile.ncols[0]
1558    nrows = infile.nrows[0]
1559    xllcorner = infile.xllcorner[0]  # Easting of lower left corner
1560    yllcorner = infile.yllcorner[0]  # Northing of lower left corner
1561    cellsize = infile.cellsize[0]
1562    NODATA_value = infile.NODATA_value[0]
1563    dem_elevation = infile.variables['elevation']
1564
1565    zone = infile.zone[0]
1566    false_easting = infile.false_easting[0]
1567    false_northing = infile.false_northing[0]
1568
1569    # Text strings
1570    projection = infile.projection
1571    datum = infile.datum
1572    units = infile.units
1573
1574    # Get output file
1575    if basename_out == None:
1576        ptsname = root + '.pts'
1577    else:
1578        ptsname = basename_out + '.pts'
1579
1580    if verbose: print 'Store to NetCDF file %s' %ptsname
1581
1582    # NetCDF file definition
1583    outfile = NetCDFFile(ptsname, netcdf_mode_w)
1584
1585    # Create new file
1586    outfile.institution = 'Geoscience Australia'
1587    outfile.description = 'NetCDF pts format for compact and portable ' \
1588                          'storage of spatial point data'
1589
1590    # Assign default values
1591    if easting_min is None: easting_min = xllcorner
1592    if easting_max is None: easting_max = xllcorner + ncols*cellsize
1593    if northing_min is None: northing_min = yllcorner
1594    if northing_max is None: northing_max = yllcorner + nrows*cellsize
1595
1596    # Compute offsets to update georeferencing
1597    easting_offset = xllcorner - easting_min
1598    northing_offset = yllcorner - northing_min
1599
1600    # Georeferencing
1601    outfile.zone = zone
1602    outfile.xllcorner = easting_min # Easting of lower left corner
1603    outfile.yllcorner = northing_min # Northing of lower left corner
1604    outfile.false_easting = false_easting
1605    outfile.false_northing = false_northing
1606
1607    outfile.projection = projection
1608    outfile.datum = datum
1609    outfile.units = units
1610
1611    # Grid info (FIXME: probably not going to be used, but heck)
1612    outfile.ncols = ncols
1613    outfile.nrows = nrows
1614
1615    dem_elevation_r = num.reshape(dem_elevation, (nrows, ncols))
1616    totalnopoints = nrows*ncols
1617
1618    # Calculating number of NODATA_values for each row in clipped region
1619    # FIXME: use array operations to do faster
1620    nn = 0
1621    k = 0
1622    i1_0 = 0
1623    j1_0 = 0
1624    thisj = 0
1625    thisi = 0
1626    for i in range(nrows):
1627        y = (nrows-i-1)*cellsize + yllcorner
1628        for j in range(ncols):
1629            x = j*cellsize + xllcorner
1630            if easting_min <= x <= easting_max \
1631               and northing_min <= y <= northing_max:
1632                thisj = j
1633                thisi = i
1634                if dem_elevation_r[i,j] == NODATA_value:
1635                    nn += 1
1636
1637                if k == 0:
1638                    i1_0 = i
1639                    j1_0 = j
1640
1641                k += 1
1642
1643    index1 = j1_0
1644    index2 = thisj
1645
1646    # Dimension definitions
1647    nrows_in_bounding_box = int(round((northing_max-northing_min)/cellsize))
1648    ncols_in_bounding_box = int(round((easting_max-easting_min)/cellsize))
1649
1650    clippednopoints = (thisi+1-i1_0)*(thisj+1-j1_0)
1651    nopoints = clippednopoints-nn
1652
1653    clipped_dem_elev = dem_elevation_r[i1_0:thisi+1,j1_0:thisj+1]
1654
1655    if verbose:
1656        print 'There are %d values in the elevation' %totalnopoints
1657        print 'There are %d values in the clipped elevation' %clippednopoints
1658        print 'There are %d NODATA_values in the clipped elevation' %nn
1659
1660    outfile.createDimension('number_of_points', nopoints)
1661    outfile.createDimension('number_of_dimensions', 2) #This is 2d data
1662
1663    # Variable definitions
1664    outfile.createVariable('points', num.Float, ('number_of_points',
1665                                                 'number_of_dimensions'))
1666    outfile.createVariable('elevation', num.Float, ('number_of_points',))
1667
1668    # Get handles to the variables
1669    points = outfile.variables['points']
1670    elevation = outfile.variables['elevation']
1671
1672    lenv = index2-index1+1
1673
1674    # Store data
1675    global_index = 0
1676    # for i in range(nrows):
1677    for i in range(i1_0, thisi+1, 1):
1678        if verbose and i % ((nrows+10)/10) == 0:
1679            print 'Processing row %d of %d' % (i, nrows)
1680
1681        lower_index = global_index
1682
1683        v = dem_elevation_r[i,index1:index2+1]
1684        no_NODATA = num.sum(v == NODATA_value)
1685        if no_NODATA > 0:
1686            newcols = lenv - no_NODATA  # ncols_in_bounding_box - no_NODATA
1687        else:
1688            newcols = lenv              # ncols_in_bounding_box
1689
1690        telev = num.zeros(newcols, num.Float)
1691        tpoints = num.zeros((newcols, 2), num.Float)
1692
1693        local_index = 0
1694
1695        y = (nrows-i-1)*cellsize + yllcorner
1696        #for j in range(ncols):
1697        for j in range(j1_0,index2+1,1):
1698            x = j*cellsize + xllcorner
1699            if easting_min <= x <= easting_max \
1700               and northing_min <= y <= northing_max \
1701               and dem_elevation_r[i,j] <> NODATA_value:
1702                tpoints[local_index, :] = [x-easting_min, y-northing_min]
1703                telev[local_index] = dem_elevation_r[i, j]
1704                global_index += 1
1705                local_index += 1
1706
1707        upper_index = global_index
1708
1709        if upper_index == lower_index + newcols:
1710            points[lower_index:upper_index, :] = tpoints
1711            elevation[lower_index:upper_index] = telev
1712
1713    assert global_index == nopoints, 'index not equal to number of points'
1714
1715    infile.close()
1716    outfile.close()
1717
1718
1719##
1720# @brief  Return block of surface lines for each cross section
1721# @param lines Iterble  of text lines to process.
1722# @note BROKEN?  UNUSED?
1723def _read_hecras_cross_sections(lines):
1724    """Return block of surface lines for each cross section
1725    Starts with SURFACE LINE,
1726    Ends with END CROSS-SECTION
1727    """
1728
1729    points = []
1730
1731    reading_surface = False
1732    for i, line in enumerate(lines):
1733        if len(line.strip()) == 0:    #Ignore blanks
1734            continue
1735
1736        if lines[i].strip().startswith('SURFACE LINE'):
1737            reading_surface = True
1738            continue
1739
1740        if lines[i].strip().startswith('END') and reading_surface:
1741            yield points
1742            reading_surface = False
1743            points = []
1744
1745        if reading_surface:
1746            fields = line.strip().split(',')
1747            easting = float(fields[0])
1748            northing = float(fields[1])
1749            elevation = float(fields[2])
1750            points.append([easting, northing, elevation])
1751
1752
1753##
1754# @brief Convert HECRAS (.sdf) file to PTS file.
1755# @param basename_in Sterm of input filename.
1756# @param basename_out Sterm of output filename.
1757# @param verbose True if this function is to be verbose.
1758def hecras_cross_sections2pts(basename_in,
1759                              basename_out=None,
1760                              verbose=False):
1761    """Read HEC-RAS Elevation datal from the following ASCII format (.sdf)
1762
1763    Example:
1764
1765# RAS export file created on Mon 15Aug2005 11:42
1766# by HEC-RAS Version 3.1.1
1767
1768BEGIN HEADER:
1769  UNITS: METRIC
1770  DTM TYPE: TIN
1771  DTM: v:\1\cit\perth_topo\river_tin
1772  STREAM LAYER: c:\local\hecras\21_02_03\up_canning_cent3d.shp
1773  CROSS-SECTION LAYER: c:\local\hecras\21_02_03\up_can_xs3d.shp
1774  MAP PROJECTION: UTM
1775  PROJECTION ZONE: 50
1776  DATUM: AGD66
1777  VERTICAL DATUM:
1778  NUMBER OF REACHES:  19
1779  NUMBER OF CROSS-SECTIONS:  14206
1780END HEADER:
1781
1782Only the SURFACE LINE data of the following form will be utilised
1783  CROSS-SECTION:
1784    STREAM ID:Southern-Wungong
1785    REACH ID:Southern-Wungong
1786    STATION:19040.*
1787    CUT LINE:
1788      405548.671603161 , 6438142.7594925
1789      405734.536092045 , 6438326.10404912
1790      405745.130459356 , 6438331.48627354
1791      405813.89633823 , 6438368.6272789
1792    SURFACE LINE:
1793     405548.67,   6438142.76,   35.37
1794     405552.24,   6438146.28,   35.41
1795     405554.78,   6438148.78,   35.44
1796     405555.80,   6438149.79,   35.44
1797     405559.37,   6438153.31,   35.45
1798     405560.88,   6438154.81,   35.44
1799     405562.93,   6438156.83,   35.42
1800     405566.50,   6438160.35,   35.38
1801     405566.99,   6438160.83,   35.37
1802     ...
1803   END CROSS-SECTION
1804
1805    Convert to NetCDF pts format which is
1806
1807    points:  (Nx2) Float array
1808    elevation: N Float array
1809    """
1810
1811    import os
1812    from Scientific.IO.NetCDF import NetCDFFile
1813
1814    root = basename_in
1815
1816    # Get ASCII file
1817    infile = open(root + '.sdf', 'r')
1818
1819    if verbose: print 'Reading DEM from %s' %(root + '.sdf')
1820
1821    lines = infile.readlines()
1822    infile.close()
1823
1824    if verbose: print 'Converting to pts format'
1825
1826    # Scan through the header, picking up stuff we need.
1827    i = 0
1828    while lines[i].strip() == '' or lines[i].strip().startswith('#'):
1829        i += 1
1830
1831    assert lines[i].strip().upper() == 'BEGIN HEADER:'
1832    i += 1
1833
1834    assert lines[i].strip().upper().startswith('UNITS:')
1835    units = lines[i].strip().split()[1]
1836    i += 1
1837
1838    assert lines[i].strip().upper().startswith('DTM TYPE:')
1839    i += 1
1840
1841    assert lines[i].strip().upper().startswith('DTM:')
1842    i += 1
1843
1844    assert lines[i].strip().upper().startswith('STREAM')
1845    i += 1
1846
1847    assert lines[i].strip().upper().startswith('CROSS')
1848    i += 1
1849
1850    assert lines[i].strip().upper().startswith('MAP PROJECTION:')
1851    projection = lines[i].strip().split(':')[1]
1852    i += 1
1853
1854    assert lines[i].strip().upper().startswith('PROJECTION ZONE:')
1855    zone = int(lines[i].strip().split(':')[1])
1856    i += 1
1857
1858    assert lines[i].strip().upper().startswith('DATUM:')
1859    datum = lines[i].strip().split(':')[1]
1860    i += 1
1861
1862    assert lines[i].strip().upper().startswith('VERTICAL DATUM:')
1863    i += 1
1864
1865    assert lines[i].strip().upper().startswith('NUMBER OF REACHES:')
1866    i += 1
1867
1868    assert lines[i].strip().upper().startswith('NUMBER OF CROSS-SECTIONS:')
1869    number_of_cross_sections = int(lines[i].strip().split(':')[1])
1870    i += 1
1871
1872    # Now read all points
1873    points = []
1874    elevation = []
1875    for j, entries in enumerate(_read_hecras_cross_sections(lines[i:])):
1876        for k, entry in enumerate(entries):
1877            points.append(entry[:2])
1878            elevation.append(entry[2])
1879
1880    msg = 'Actual #number_of_cross_sections == %d, Reported as %d'\
1881          %(j+1, number_of_cross_sections)
1882    assert j+1 == number_of_cross_sections, msg
1883
1884    # Get output file, write PTS data
1885    if basename_out == None:
1886        ptsname = root + '.pts'
1887    else:
1888        ptsname = basename_out + '.pts'
1889
1890    geo_ref = Geo_reference(zone, 0, 0, datum, projection, units)
1891    geo = Geospatial_data(points, {"elevation":elevation},
1892                          verbose=verbose, geo_reference=geo_ref)
1893    geo.export_points_file(ptsname)
1894
1895
1896##
1897# @brief
1898# @param basename_in
1899# @param extra_name_out
1900# @param quantities
1901# @param timestep
1902# @param reduction
1903# @param cellsize
1904# @param number_of_decimal_places
1905# @param NODATA_value
1906# @param easting_min
1907# @param easting_max
1908# @param northing_min
1909# @param northing_max
1910# @param verbose
1911# @param origin
1912# @param datum
1913# @param format
1914# @return
1915def export_grid(basename_in, extra_name_out=None,
1916                quantities=None, # defaults to elevation
1917                timestep=None,
1918                reduction=None,
1919                cellsize=10,
1920                number_of_decimal_places=None,
1921                NODATA_value=-9999,
1922                easting_min=None,
1923                easting_max=None,
1924                northing_min=None,
1925                northing_max=None,
1926                verbose=False,
1927                origin=None,
1928                datum='WGS84',
1929                format='ers'):
1930    """Wrapper for sww2dem.
1931    See sww2dem to find out what most of the parameters do.
1932
1933    Quantities is a list of quantities.  Each quantity will be
1934    calculated for each sww file.
1935
1936    This returns the basenames of the files returned, which is made up
1937    of the dir and all of the file name, except the extension.
1938
1939    This function returns the names of the files produced.
1940
1941    It will also produce as many output files as there are input sww files.
1942    """
1943
1944    if quantities is None:
1945        quantities = ['elevation']
1946
1947    if type(quantities) is str:
1948            quantities = [quantities]
1949
1950    # How many sww files are there?
1951    dir, base = os.path.split(basename_in)
1952
1953    iterate_over = get_all_swwfiles(dir, base, verbose)
1954
1955    if dir == "":
1956        dir = "." # Unix compatibility
1957
1958    files_out = []
1959    for sww_file in iterate_over:
1960        for quantity in quantities:
1961            if extra_name_out is None:
1962                basename_out = sww_file + '_' + quantity
1963            else:
1964                basename_out = sww_file + '_' + quantity + '_' + extra_name_out
1965
1966            file_out = sww2dem(dir+sep+sww_file, dir+sep+basename_out,
1967                               quantity,
1968                               timestep,
1969                               reduction,
1970                               cellsize,
1971                               number_of_decimal_places,
1972                               NODATA_value,
1973                               easting_min,
1974                               easting_max,
1975                               northing_min,
1976                               northing_max,
1977                               verbose,
1978                               origin,
1979                               datum,
1980                               format)
1981            files_out.append(file_out)
1982    return files_out
1983
1984
1985##
1986# @brief
1987# @param production_dirs
1988# @param output_dir
1989# @param scenario_name
1990# @param gauges_dir_name
1991# @param plot_quantity
1992# @param generate_fig
1993# @param reportname
1994# @param surface
1995# @param time_min
1996# @param time_max
1997# @param title_on
1998# @param verbose
1999# @param nodes
2000def get_timeseries(production_dirs, output_dir, scenario_name, gauges_dir_name,
2001                   plot_quantity, generate_fig=False,
2002                   reportname=None, surface=False, time_min=None,
2003                   time_max=None, title_on=False, verbose=True,
2004                   nodes=None):
2005    """
2006    nodes - number of processes used.
2007
2008    warning - this function has no tests
2009    """
2010
2011    if reportname == None:
2012        report = False
2013    else:
2014        report = True
2015
2016    if nodes is None:
2017        is_parallel = False
2018    else:
2019        is_parallel = True
2020
2021    # Generate figures
2022    swwfiles = {}
2023    if is_parallel is True:
2024        for i in range(nodes):
2025            print 'Sending node %d of %d' % (i, nodes)
2026            swwfiles = {}
2027            if not reportname == None:
2028                reportname = report_name + '_%s' % i
2029            for label_id in production_dirs.keys():
2030                if label_id == 'boundaries':
2031                    swwfile = best_boundary_sww
2032                else:
2033                    file_loc = output_dir + label_id + sep
2034                    sww_extra = '_P%s_%s' % (i, nodes)
2035                    swwfile = file_loc + scenario_name + sww_extra + '.sww'
2036                    print 'swwfile', swwfile
2037                    swwfiles[swwfile] = label_id
2038
2039                texname, elev_output = sww2timeseries(swwfiles,
2040                                              gauges_dir_name,
2041                                              production_dirs,
2042                                              report=report,
2043                                              reportname=reportname,
2044                                              plot_quantity=plot_quantity,
2045                                              generate_fig=generate_fig,
2046                                              surface=surface,
2047                                              time_min=time_min,
2048                                              time_max=time_max,
2049                                              title_on=title_on,
2050                                              verbose=verbose)
2051    else:
2052        for label_id in production_dirs.keys():
2053            if label_id == 'boundaries':
2054                print 'boundaries'
2055                file_loc = project.boundaries_in_dir
2056                swwfile = project.boundaries_dir_name3 + '.sww'
2057                #  swwfile = boundary_dir_filename
2058            else:
2059                file_loc = output_dir + label_id + sep
2060                swwfile = file_loc + scenario_name + '.sww'
2061            swwfiles[swwfile] = label_id
2062
2063        texname, elev_output = sww2timeseries(swwfiles,
2064                                              gauges_dir_name,
2065                                              production_dirs,
2066                                              report=report,
2067                                              reportname=reportname,
2068                                              plot_quantity=plot_quantity,
2069                                              generate_fig=generate_fig,
2070                                              surface=surface,
2071                                              time_min=time_min,
2072                                              time_max=time_max,
2073                                              title_on=title_on,
2074                                              verbose=verbose)
2075
2076
2077##
2078# @brief Convert SWW file to DEM file (.asc or .ers).
2079# @param basename_in
2080# @param basename_out
2081# @param quantity
2082# @param timestep
2083# @param reduction
2084# @param cellsize 
2085# @param number_of_decimal_places
2086# @param NODATA_value
2087# @param easting_min
2088# @param easting_max
2089# @param northing_min
2090# @param northing_max
2091# @param verbose
2092# @param origin
2093# @param datum
2094# @param format
2095# @return
2096def sww2dem(basename_in, basename_out=None,
2097            quantity=None, # defaults to elevation
2098            timestep=None,
2099            reduction=None,
2100            cellsize=10,
2101            number_of_decimal_places=None,
2102            NODATA_value=-9999,
2103            easting_min=None,
2104            easting_max=None,
2105            northing_min=None,
2106            northing_max=None,
2107            verbose=False,
2108            origin=None,
2109            datum='WGS84',
2110            format='ers',
2111            block_size=None):
2112    """Read SWW file and convert to Digitial Elevation model format
2113    (.asc or .ers)
2114
2115    Example (ASC):
2116    ncols         3121
2117    nrows         1800
2118    xllcorner     722000
2119    yllcorner     5893000
2120    cellsize      25
2121    NODATA_value  -9999
2122    138.3698 137.4194 136.5062 135.5558 ..........
2123
2124    The number of decimal places can be specified by the user to save
2125    on disk space requirements by specifying in the call to sww2dem.
2126
2127    Also write accompanying file with same basename_in but extension .prj
2128    used to fix the UTM zone, datum, false northings and eastings.
2129
2130    The prj format is assumed to be as
2131
2132    Projection    UTM
2133    Zone          56
2134    Datum         WGS84
2135    Zunits        NO
2136    Units         METERS
2137    Spheroid      WGS84
2138    Xshift        0.0000000000
2139    Yshift        10000000.0000000000
2140    Parameters
2141
2142
2143    The parameter quantity must be the name of an existing quantity or
2144    an expression involving existing quantities. The default is
2145    'elevation'. Quantity is not a list of quantities.
2146
2147    if timestep (an index) is given, output quantity at that timestep
2148
2149    if reduction is given use that to reduce quantity over all timesteps.
2150
2151    datum
2152
2153    format can be either 'asc' or 'ers'
2154
2155    block_size - sets the number of slices along the non-time axis to
2156                 process in one block.
2157    """
2158
2159    import sys
2160
2161    from anuga.utilities.polygon import inside_polygon, outside_polygon, \
2162         separate_points_by_polygon
2163    from anuga.abstract_2d_finite_volumes.util import \
2164         apply_expression_to_dictionary
2165
2166    msg = 'Format must be either asc or ers'
2167    assert format.lower() in ['asc', 'ers'], msg
2168
2169    false_easting = 500000
2170    false_northing = 10000000
2171
2172    if quantity is None:
2173        quantity = 'elevation'
2174
2175    if reduction is None:
2176        reduction = max
2177
2178    if basename_out is None:
2179        basename_out = basename_in + '_%s' % quantity
2180
2181    if quantity_formula.has_key(quantity):
2182        quantity = quantity_formula[quantity]
2183
2184    if number_of_decimal_places is None:
2185        number_of_decimal_places = 3
2186
2187    if block_size is None:
2188        block_size = DEFAULT_BLOCK_SIZE
2189
2190    # Read SWW file
2191    swwfile = basename_in + '.sww'
2192    demfile = basename_out + '.' + format
2193
2194    # Read sww file
2195    if verbose:
2196        print 'Reading from %s' % swwfile
2197        print 'Output directory is %s' % basename_out
2198
2199    from Scientific.IO.NetCDF import NetCDFFile
2200    fid = NetCDFFile(swwfile)
2201
2202    #Get extent and reference
2203    x = fid.variables['x'][:]
2204    y = fid.variables['y'][:]
2205    volumes = fid.variables['volumes'][:]
2206    if timestep is not None:
2207        times = fid.variables['time'][timestep]
2208    else:
2209        times = fid.variables['time'][:]
2210
2211    number_of_timesteps = fid.dimensions['number_of_timesteps']
2212    number_of_points = fid.dimensions['number_of_points']
2213
2214    if origin is None:
2215        # Get geo_reference
2216        # sww files don't have to have a geo_ref
2217        try:
2218            geo_reference = Geo_reference(NetCDFObject=fid)
2219        except AttributeError, e:
2220            geo_reference = Geo_reference() # Default georef object
2221
2222        xllcorner = geo_reference.get_xllcorner()
2223        yllcorner = geo_reference.get_yllcorner()
2224        zone = geo_reference.get_zone()
2225    else:
2226        zone = origin[0]
2227        xllcorner = origin[1]
2228        yllcorner = origin[2]
2229
2230    # FIXME: Refactor using code from Interpolation_function.statistics
2231    # (in interpolate.py)
2232    # Something like print swwstats(swwname)
2233    if verbose:
2234        print '------------------------------------------------'
2235        print 'Statistics of SWW file:'
2236        print '  Name: %s' %swwfile
2237        print '  Reference:'
2238        print '    Lower left corner: [%f, %f]'\
2239              %(xllcorner, yllcorner)
2240        if timestep is not None:
2241            print '    Time: %f' %(times)
2242        else:
2243            print '    Start time: %f' %fid.starttime[0]
2244        print '  Extent:'
2245        print '    x [m] in [%f, %f], len(x) == %d'\
2246              %(min(x.flat), max(x.flat), len(x.flat))
2247        print '    y [m] in [%f, %f], len(y) == %d'\
2248              %(min(y.flat), max(y.flat), len(y.flat))
2249        if timestep is not None:
2250            print '    t [s] = %f, len(t) == %d' %(times, 1)
2251        else:
2252            print '    t [s] in [%f, %f], len(t) == %d'\
2253              %(min(times), max(times), len(times))
2254        print '  Quantities [SI units]:'
2255        # Comment out for reduced memory consumption
2256        for name in ['stage', 'xmomentum', 'ymomentum']:
2257            q = fid.variables[name][:].flat
2258            if timestep is not None:
2259                q = q[timestep*len(x):(timestep+1)*len(x)]
2260            if verbose: print '    %s in [%f, %f]' %(name, min(q), max(q))
2261        for name in ['elevation']:
2262            q = fid.variables[name][:].flat
2263            if verbose: print '    %s in [%f, %f]' %(name, min(q), max(q))
2264
2265    # Get the variables in the supplied expression.
2266    # This may throw a SyntaxError exception.
2267    var_list = get_vars_in_expression(quantity)
2268
2269    # Check that we have the required variables in the SWW file.
2270    missing_vars = []
2271    for name in var_list:
2272        try:
2273            _ = fid.variables[name]
2274        except:
2275            missing_vars.append(name)
2276    if missing_vars:
2277        msg = ("In expression '%s', variables %s are not in the SWW file '%s'"
2278               % (quantity, swwfile))
2279        raise Exception, msg
2280
2281    # Create result array and start filling, block by block.
2282    result = num.zeros(number_of_points, num.Float)
2283
2284    for start_slice in xrange(0, number_of_points, block_size):
2285        # limit slice size to array end if at last block
2286        end_slice = min(start_slice + block_size, number_of_points)
2287       
2288        # get slices of all required variables
2289        q_dict = {}
2290        for name in var_list:
2291            # check if variable has time axis
2292            if len(fid.variables[name].shape) == 2:
2293                q_dict[name] = fid.variables[name][:,start_slice:end_slice]
2294            else:       # no time axis
2295                q_dict[name] = fid.variables[name][start_slice:end_slice]
2296
2297        # Evaluate expression with quantities found in SWW file
2298        res = apply_expression_to_dictionary(quantity, q_dict)
2299
2300        if len(res.shape) == 2:
2301            new_res = num.zeros(res.shape[1], num.Float)
2302            for k in xrange(res.shape[1]):
2303                new_res[k] = reduction(res[:,k])
2304            res = new_res
2305
2306        result[start_slice:end_slice] = res
2307                                   
2308    #Post condition: Now q has dimension: number_of_points
2309    assert len(result.shape) == 1
2310    assert result.shape[0] == number_of_points
2311
2312    if verbose:
2313        print 'Processed values for %s are in [%f, %f]' % \
2314              (quantity, min(result), max(result))
2315
2316    #Create grid and update xll/yll corner and x,y
2317    #Relative extent
2318    if easting_min is None:
2319        xmin = min(x)
2320    else:
2321        xmin = easting_min - xllcorner
2322
2323    if easting_max is None:
2324        xmax = max(x)
2325    else:
2326        xmax = easting_max - xllcorner
2327
2328    if northing_min is None:
2329        ymin = min(y)
2330    else:
2331        ymin = northing_min - yllcorner
2332
2333    if northing_max is None:
2334        ymax = max(y)
2335    else:
2336        ymax = northing_max - yllcorner
2337
2338    msg = 'xmax must be greater than or equal to xmin.\n'
2339    msg += 'I got xmin = %f, xmax = %f' %(xmin, xmax)
2340    assert xmax >= xmin, msg
2341
2342    msg = 'yax must be greater than or equal to xmin.\n'
2343    msg += 'I got ymin = %f, ymax = %f' %(ymin, ymax)
2344    assert ymax >= ymin, msg
2345
2346    if verbose: print 'Creating grid'
2347    ncols = int((xmax-xmin)/cellsize) + 1
2348    nrows = int((ymax-ymin)/cellsize) + 1
2349
2350    #New absolute reference and coordinates
2351    newxllcorner = xmin + xllcorner
2352    newyllcorner = ymin + yllcorner
2353
2354    x = x + xllcorner - newxllcorner
2355    y = y + yllcorner - newyllcorner
2356
2357    vertex_points = num.concatenate ((x[:,num.NewAxis], y[:,num.NewAxis]), axis=1)
2358    assert len(vertex_points.shape) == 2
2359
2360    grid_points = num.zeros ((ncols*nrows, 2), num.Float)
2361
2362    for i in xrange(nrows):
2363        if format.lower() == 'asc':
2364            yg = i * cellsize
2365        else:
2366        #this will flip the order of the y values for ers
2367            yg = (nrows-i) * cellsize
2368
2369        for j in xrange(ncols):
2370            xg = j * cellsize
2371            k = i*ncols + j
2372
2373            grid_points[k, 0] = xg
2374            grid_points[k, 1] = yg
2375
2376    #Interpolate
2377    from anuga.fit_interpolate.interpolate import Interpolate
2378
2379    # Remove loners from vertex_points, volumes here
2380    vertex_points, volumes = remove_lone_verts(vertex_points, volumes)
2381    #export_mesh_file('monkey.tsh',{'vertices':vertex_points, 'triangles':volumes})
2382    #import sys; sys.exit()
2383    interp = Interpolate(vertex_points, volumes, verbose = verbose)
2384
2385    #Interpolate using quantity values
2386    if verbose: print 'Interpolating'
2387    grid_values = interp.interpolate(result, grid_points).flat
2388
2389    if verbose:
2390        print 'Interpolated values are in [%f, %f]' %(min(grid_values),
2391                                                      max(grid_values))
2392
2393    #Assign NODATA_value to all points outside bounding polygon (from interpolation mesh)
2394    P = interp.mesh.get_boundary_polygon()
2395    outside_indices = outside_polygon(grid_points, P, closed=True)
2396
2397    for i in outside_indices:
2398        grid_values[i] = NODATA_value
2399
2400    if format.lower() == 'ers':
2401        # setup ERS header information
2402        grid_values = num.reshape(grid_values, (nrows, ncols))
2403        header = {}
2404        header['datum'] = '"' + datum + '"'
2405        # FIXME The use of hardwired UTM and zone number needs to be made optional
2406        # FIXME Also need an automatic test for coordinate type (i.e. EN or LL)
2407        header['projection'] = '"UTM-' + str(zone) + '"'
2408        header['coordinatetype'] = 'EN'
2409        if header['coordinatetype'] == 'LL':
2410            header['longitude'] = str(newxllcorner)
2411            header['latitude'] = str(newyllcorner)
2412        elif header['coordinatetype'] == 'EN':
2413            header['eastings'] = str(newxllcorner)
2414            header['northings'] = str(newyllcorner)
2415        header['nullcellvalue'] = str(NODATA_value)
2416        header['xdimension'] = str(cellsize)
2417        header['ydimension'] = str(cellsize)
2418        header['value'] = '"' + quantity + '"'
2419        #header['celltype'] = 'IEEE8ByteReal'  #FIXME: Breaks unit test
2420
2421        #Write
2422        if verbose: print 'Writing %s' %demfile
2423
2424        import ermapper_grids
2425
2426        ermapper_grids.write_ermapper_grid(demfile, grid_values, header)
2427
2428        fid.close()
2429    else:
2430        #Write to Ascii format
2431        #Write prj file
2432        prjfile = basename_out + '.prj'
2433
2434        if verbose: print 'Writing %s' %prjfile
2435        prjid = open(prjfile, 'w')
2436        prjid.write('Projection    %s\n' %'UTM')
2437        prjid.write('Zone          %d\n' %zone)
2438        prjid.write('Datum         %s\n' %datum)
2439        prjid.write('Zunits        NO\n')
2440        prjid.write('Units         METERS\n')
2441        prjid.write('Spheroid      %s\n' %datum)
2442        prjid.write('Xshift        %d\n' %false_easting)
2443        prjid.write('Yshift        %d\n' %false_northing)
2444        prjid.write('Parameters\n')
2445        prjid.close()
2446
2447        if verbose: print 'Writing %s' %demfile
2448
2449        ascid = open(demfile, 'w')
2450
2451        ascid.write('ncols         %d\n' %ncols)
2452        ascid.write('nrows         %d\n' %nrows)
2453        ascid.write('xllcorner     %d\n' %newxllcorner)
2454        ascid.write('yllcorner     %d\n' %newyllcorner)
2455        ascid.write('cellsize      %f\n' %cellsize)
2456        ascid.write('NODATA_value  %d\n' %NODATA_value)
2457
2458        #Get bounding polygon from mesh
2459        #P = interp.mesh.get_boundary_polygon()
2460        #inside_indices = inside_polygon(grid_points, P)
2461        for i in range(nrows):
2462            if verbose and i % ((nrows+10)/10) == 0:
2463                print 'Doing row %d of %d' %(i, nrows)
2464
2465            base_index = (nrows-i-1)*ncols
2466
2467            slice = grid_values[base_index:base_index+ncols]
2468            #s = array2string(slice, max_line_width=sys.maxint)
2469            s = num.array2string(slice, max_line_width=sys.maxint,
2470                                 precision=number_of_decimal_places)
2471            ascid.write(s[1:-1] + '\n')
2472
2473        #Close
2474        ascid.close()
2475        fid.close()
2476
2477        return basename_out
2478
2479################################################################################
2480# Obsolete functions - mainatined for backwards compatibility
2481################################################################################
2482
2483##
2484# @brief
2485# @param basename_in
2486# @param basename_out
2487# @param quantity
2488# @param timestep
2489# @param reduction
2490# @param cellsize
2491# @param verbose
2492# @param origin
2493# @note OBSOLETE - use sww2dem() instead.
2494def sww2asc(basename_in, basename_out = None,
2495            quantity = None,
2496            timestep = None,
2497            reduction = None,
2498            cellsize = 10,
2499            verbose = False,
2500            origin = None):
2501    print 'sww2asc will soon be obsoleted - please use sww2dem'
2502    sww2dem(basename_in,
2503            basename_out = basename_out,
2504            quantity = quantity,
2505            timestep = timestep,
2506            reduction = reduction,
2507            cellsize = cellsize,
2508            number_of_decimal_places = number_of_decimal_places,
2509            verbose = verbose,
2510            origin = origin,
2511        datum = 'WGS84',
2512        format = 'asc')
2513
2514
2515##
2516# @brief
2517# @param basename_in
2518# @param basename_out
2519# @param quantity
2520# @param timestep
2521# @param reduction
2522# @param cellsize
2523# @param verbose
2524# @param origin
2525# @param datum
2526# @note OBSOLETE - use sww2dem() instead.
2527def sww2ers(basename_in, basename_out=None,
2528            quantity=None,
2529            timestep=None,
2530            reduction=None,
2531            cellsize=10,
2532            verbose=False,
2533            origin=None,
2534            datum='WGS84'):
2535    print 'sww2ers will soon be obsoleted - please use sww2dem'
2536    sww2dem(basename_in,
2537            basename_out=basename_out,
2538            quantity=quantity,
2539            timestep=timestep,
2540            reduction=reduction,
2541            cellsize=cellsize,
2542            number_of_decimal_places=number_of_decimal_places,
2543            verbose=verbose,
2544            origin=origin,
2545            datum=datum,
2546            format='ers')
2547
2548################################################################################
2549# End of obsolete functions
2550################################################################################
2551
2552
2553##
2554# @brief Convert SWW file to PTS file (at selected points).
2555# @param basename_in Stem name of input SWW file.
2556# @param basename_out Stem name of output file.
2557# @param data_points If given, points where quantity is to be computed.
2558# @param quantity Name (or expression) of existing quantity(s) (def: elevation).
2559# @param timestep If given, output quantity at that timestep.
2560# @param reduction If given, reduce quantity by this factor.
2561# @param NODATA_value The NODATA value (default -9999).
2562# @param verbose True if this function is to be verbose.
2563# @param origin ??
2564def sww2pts(basename_in, basename_out=None,
2565            data_points=None,
2566            quantity=None,
2567            timestep=None,
2568            reduction=None,
2569            NODATA_value=-9999,
2570            verbose=False,
2571            origin=None):
2572    """Read SWW file and convert to interpolated values at selected points
2573
2574    The parameter 'quantity' must be the name of an existing quantity or
2575    an expression involving existing quantities. The default is 'elevation'.
2576
2577    if timestep (an index) is given, output quantity at that timestep.
2578
2579    if reduction is given use that to reduce quantity over all timesteps.
2580
2581    data_points (Nx2 array) give locations of points where quantity is to
2582    be computed.
2583    """
2584
2585    import sys
2586    from anuga.utilities.polygon import inside_polygon, outside_polygon, \
2587             separate_points_by_polygon
2588    from anuga.abstract_2d_finite_volumes.util import \
2589             apply_expression_to_dictionary
2590    from anuga.geospatial_data.geospatial_data import Geospatial_data
2591
2592    if quantity is None:
2593        quantity = 'elevation'
2594
2595    if reduction is None:
2596        reduction = max
2597
2598    if basename_out is None:
2599        basename_out = basename_in + '_%s' % quantity
2600
2601    swwfile = basename_in + '.sww'
2602    ptsfile = basename_out + '.pts'
2603
2604    # Read sww file
2605    if verbose: print 'Reading from %s' % swwfile
2606    from Scientific.IO.NetCDF import NetCDFFile
2607    fid = NetCDFFile(swwfile)
2608
2609    # Get extent and reference
2610    x = fid.variables['x'][:]
2611    y = fid.variables['y'][:]
2612    volumes = fid.variables['volumes'][:]
2613
2614    number_of_timesteps = fid.dimensions['number_of_timesteps']
2615    number_of_points = fid.dimensions['number_of_points']
2616    if origin is None:
2617        # Get geo_reference
2618        # sww files don't have to have a geo_ref
2619        try:
2620            geo_reference = Geo_reference(NetCDFObject=fid)
2621        except AttributeError, e:
2622            geo_reference = Geo_reference() # Default georef object
2623
2624        xllcorner = geo_reference.get_xllcorner()
2625        yllcorner = geo_reference.get_yllcorner()
2626        zone = geo_reference.get_zone()
2627    else:
2628        zone = origin[0]
2629        xllcorner = origin[1]
2630        yllcorner = origin[2]
2631
2632    # FIXME: Refactor using code from file_function.statistics
2633    # Something like print swwstats(swwname)
2634    if verbose:
2635        x = fid.variables['x'][:]
2636        y = fid.variables['y'][:]
2637        times = fid.variables['time'][:]
2638        print '------------------------------------------------'
2639        print 'Statistics of SWW file:'
2640        print '  Name: %s' % swwfile
2641        print '  Reference:'
2642        print '    Lower left corner: [%f, %f]' % (xllcorner, yllcorner)
2643        print '    Start time: %f' % fid.starttime[0]
2644        print '  Extent:'
2645        print '    x [m] in [%f, %f], len(x) == %d' \
2646              % (min(x.flat), max(x.flat), len(x.flat))
2647        print '    y [m] in [%f, %f], len(y) == %d' \
2648              % (min(y.flat), max(y.flat), len(y.flat))
2649        print '    t [s] in [%f, %f], len(t) == %d' \
2650              % (min(times), max(times), len(times))
2651        print '  Quantities [SI units]:'
2652        for name in ['stage', 'xmomentum', 'ymomentum', 'elevation']:
2653            q = fid.variables[name][:].flat
2654            print '    %s in [%f, %f]' % (name, min(q), max(q))
2655
2656    # Get quantity and reduce if applicable
2657    if verbose: print 'Processing quantity %s' % quantity
2658
2659    # Turn NetCDF objects into Numeric arrays
2660    quantity_dict = {}
2661    for name in fid.variables.keys():
2662        quantity_dict[name] = fid.variables[name][:]
2663
2664    # Convert quantity expression to quantities found in sww file
2665    q = apply_expression_to_dictionary(quantity, quantity_dict)
2666
2667    if len(q.shape) == 2:
2668        # q has a time component and needs to be reduced along
2669        # the temporal dimension
2670        if verbose: print 'Reducing quantity %s' % quantity
2671
2672        q_reduced = num.zeros(number_of_points, num.Float)
2673        for k in range(number_of_points):
2674            q_reduced[k] = reduction(q[:,k])
2675        q = q_reduced
2676
2677    # Post condition: Now q has dimension: number_of_points
2678    assert len(q.shape) == 1
2679    assert q.shape[0] == number_of_points
2680
2681    if verbose:
2682        print 'Processed values for %s are in [%f, %f]' \
2683              % (quantity, min(q), max(q))
2684
2685    # Create grid and update xll/yll corner and x,y
2686    vertex_points = num.concatenate((x[:, num.NewAxis], y[:, num.NewAxis]), axis=1)
2687    assert len(vertex_points.shape) == 2
2688
2689    # Interpolate
2690    from anuga.fit_interpolate.interpolate import Interpolate
2691    interp = Interpolate(vertex_points, volumes, verbose=verbose)
2692
2693    # Interpolate using quantity values
2694    if verbose: print 'Interpolating'
2695    interpolated_values = interp.interpolate(q, data_points).flat
2696
2697    if verbose:
2698        print 'Interpolated values are in [%f, %f]' % (min(interpolated_values),
2699                                                       max(interpolated_values))
2700
2701    # Assign NODATA_value to all points outside bounding polygon
2702    # (from interpolation mesh)
2703    P = interp.mesh.get_boundary_polygon()
2704    outside_indices = outside_polygon(data_points, P, closed=True)
2705
2706    for i in outside_indices:
2707        interpolated_values[i] = NODATA_value
2708
2709    # Store results
2710    G = Geospatial_data(data_points=data_points, attributes=interpolated_values)
2711
2712    G.export_points_file(ptsfile, absolute = True)
2713
2714    fid.close()
2715
2716
2717##
2718# @brief Convert ASC file to DEM file.
2719# @param basename_in Stem of input filename.
2720# @param basename_out Stem of output filename.
2721# @param use_cache ??
2722# @param verbose True if this function is to be verbose.
2723# @return
2724# @note A PRJ file with same stem basename must exist and is used to fix the
2725#       UTM zone, datum, false northings and eastings.
2726def convert_dem_from_ascii2netcdf(basename_in, basename_out=None,
2727                                  use_cache=False,
2728                                  verbose=False):
2729    """Read Digital Elevation model from the following ASCII format (.asc)
2730
2731    Example:
2732    ncols         3121
2733    nrows         1800
2734    xllcorner     722000
2735    yllcorner     5893000
2736    cellsize      25
2737    NODATA_value  -9999
2738    138.3698 137.4194 136.5062 135.5558 ..........
2739
2740    Convert basename_in + '.asc' to NetCDF format (.dem)
2741    mimicking the ASCII format closely.
2742
2743    An accompanying file with same basename_in but extension .prj must exist
2744    and is used to fix the UTM zone, datum, false northings and eastings.
2745
2746    The prj format is assumed to be as
2747
2748    Projection    UTM
2749    Zone          56
2750    Datum         WGS84
2751    Zunits        NO
2752    Units         METERS
2753    Spheroid      WGS84
2754    Xshift        0.0000000000
2755    Yshift        10000000.0000000000
2756    Parameters
2757    """
2758
2759    kwargs = {'basename_out': basename_out, 'verbose': verbose}
2760
2761    if use_cache is True:
2762        from caching import cache
2763        result = cache(_convert_dem_from_ascii2netcdf, basename_in, kwargs,
2764                       dependencies=[basename_in + '.asc',
2765                                     basename_in + '.prj'],
2766                       verbose=verbose)
2767
2768    else:
2769        result = apply(_convert_dem_from_ascii2netcdf, [basename_in], kwargs)
2770
2771    return result
2772
2773
2774##
2775# @brief Convert an ASC file to a DEM file.
2776# @param basename_in Stem of input filename.
2777# @param basename_out Stem of output filename.
2778# @param verbose True if this function is to be verbose.
2779def _convert_dem_from_ascii2netcdf(basename_in, basename_out = None,
2780                                   verbose = False):
2781    """Read Digital Elevation model from the following ASCII format (.asc)
2782
2783    Internal function. See public function convert_dem_from_ascii2netcdf
2784    for details.
2785    """
2786
2787    import os
2788    from Scientific.IO.NetCDF import NetCDFFile
2789
2790    root = basename_in
2791
2792    # Read Meta data
2793    if verbose: print 'Reading METADATA from %s' % root + '.prj'
2794
2795    metadatafile = open(root + '.prj')
2796    metalines = metadatafile.readlines()
2797    metadatafile.close()
2798
2799    L = metalines[0].strip().split()
2800    assert L[0].strip().lower() == 'projection'
2801    projection = L[1].strip()                   #TEXT
2802
2803    L = metalines[1].strip().split()
2804    assert L[0].strip().lower() == 'zone'
2805    zone = int(L[1].strip())
2806
2807    L = metalines[2].strip().split()
2808    assert L[0].strip().lower() == 'datum'
2809    datum = L[1].strip()                        #TEXT
2810
2811    L = metalines[3].strip().split()
2812    assert L[0].strip().lower() == 'zunits'     #IGNORE
2813    zunits = L[1].strip()                       #TEXT
2814
2815    L = metalines[4].strip().split()
2816    assert L[0].strip().lower() == 'units'
2817    units = L[1].strip()                        #TEXT
2818
2819    L = metalines[5].strip().split()
2820    assert L[0].strip().lower() == 'spheroid'   #IGNORE
2821    spheroid = L[1].strip()                     #TEXT
2822
2823    L = metalines[6].strip().split()
2824    assert L[0].strip().lower() == 'xshift'
2825    false_easting = float(L[1].strip())
2826
2827    L = metalines[7].strip().split()
2828    assert L[0].strip().lower() == 'yshift'
2829    false_northing = float(L[1].strip())
2830
2831    #Read DEM data
2832    datafile = open(basename_in + '.asc')
2833
2834    if verbose: print 'Reading DEM from %s' % basename_in + '.asc'
2835
2836    lines = datafile.readlines()
2837    datafile.close()
2838
2839    if verbose: print 'Got', len(lines), ' lines'
2840
2841    ncols = int(lines[0].split()[1].strip())
2842    nrows = int(lines[1].split()[1].strip())
2843
2844    # Do cellsize (line 4) before line 2 and 3
2845    cellsize = float(lines[4].split()[1].strip())
2846
2847    # Checks suggested by Joaquim Luis
2848    # Our internal representation of xllcorner
2849    # and yllcorner is non-standard.
2850    xref = lines[2].split()
2851    if xref[0].strip() == 'xllcorner':
2852        xllcorner = float(xref[1].strip()) # + 0.5*cellsize # Correct offset
2853    elif xref[0].strip() == 'xllcenter':
2854        xllcorner = float(xref[1].strip())
2855    else:
2856        msg = 'Unknown keyword: %s' % xref[0].strip()
2857        raise Exception, msg
2858
2859    yref = lines[3].split()
2860    if yref[0].strip() == 'yllcorner':
2861        yllcorner = float(yref[1].strip()) # + 0.5*cellsize # Correct offset
2862    elif yref[0].strip() == 'yllcenter':
2863        yllcorner = float(yref[1].strip())
2864    else:
2865        msg = 'Unknown keyword: %s' % yref[0].strip()
2866        raise Exception, msg
2867
2868    NODATA_value = int(lines[5].split()[1].strip())
2869
2870    assert len(lines) == nrows + 6
2871
2872    if basename_out == None:
2873        netcdfname = root + '.dem'
2874    else:
2875        netcdfname = basename_out + '.dem'
2876
2877    if verbose: print 'Store to NetCDF file %s' % netcdfname
2878
2879    # NetCDF file definition
2880    fid = NetCDFFile(netcdfname, netcdf_mode_w)
2881
2882    #Create new file
2883    fid.institution = 'Geoscience Australia'
2884    fid.description = 'NetCDF DEM format for compact and portable storage ' \
2885                      'of spatial point data'
2886
2887    fid.ncols = ncols
2888    fid.nrows = nrows
2889    fid.xllcorner = xllcorner
2890    fid.yllcorner = yllcorner
2891    fid.cellsize = cellsize
2892    fid.NODATA_value = NODATA_value
2893
2894    fid.zone = zone
2895    fid.false_easting = false_easting
2896    fid.false_northing = false_northing
2897    fid.projection = projection
2898    fid.datum = datum
2899    fid.units = units
2900
2901    # dimension definitions
2902    fid.createDimension('number_of_rows', nrows)
2903    fid.createDimension('number_of_columns', ncols)
2904
2905    # variable definitions
2906    fid.createVariable('elevation', num.Float, ('number_of_rows',
2907                                                'number_of_columns'))
2908
2909    # Get handles to the variables
2910    elevation = fid.variables['elevation']
2911
2912    # Store data
2913    n = len(lines[6:])
2914    for i, line in enumerate(lines[6:]):
2915        fields = line.split()
2916        if verbose and i % ((n+10)/10) == 0:
2917            print 'Processing row %d of %d' % (i, nrows)
2918
2919           
2920        if len(fields) != ncols:
2921            msg = 'Wrong number of columns in file "%s" line %d\n' % (basename_in + '.asc', i)
2922            msg += 'I got %d elements, but there should have been %d\n' % (len(fields), ncols)
2923            raise Exception, msg
2924
2925        elevation[i, :] = num.array([float(x) for x in fields])
2926
2927    fid.close()
2928
2929
2930##
2931# @brief Convert 'ferret' file to SWW file.
2932# @param basename_in Stem of input filename.
2933# @param basename_out Stem of output filename.
2934# @param verbose True if this function is to be verbose.
2935# @param minlat
2936# @param maxlat
2937# @param minlon
2938# @param maxlon
2939# @param mint
2940# @param maxt
2941# @param mean_stage
2942# @param origin
2943# @param zscale
2944# @param fail_on_NaN
2945# @param NaN_filler
2946# @param elevation
2947# @param inverted_bathymetry
2948def ferret2sww(basename_in, basename_out=None,
2949               verbose=False,
2950               minlat=None, maxlat=None,
2951               minlon=None, maxlon=None,
2952               mint=None, maxt=None, mean_stage=0,
2953               origin=None, zscale=1,
2954               fail_on_NaN=True,
2955               NaN_filler=0,
2956               elevation=None,
2957               inverted_bathymetry=True
2958               ): #FIXME: Bathymetry should be obtained
2959                                  #from MOST somehow.
2960                                  #Alternatively from elsewhere
2961                                  #or, as a last resort,
2962                                  #specified here.
2963                                  #The value of -100 will work
2964                                  #for the Wollongong tsunami
2965                                  #scenario but is very hacky
2966    """Convert MOST and 'Ferret' NetCDF format for wave propagation to
2967    sww format native to abstract_2d_finite_volumes.
2968
2969    Specify only basename_in and read files of the form
2970    basefilename_ha.nc, basefilename_ua.nc, basefilename_va.nc containing
2971    relative height, x-velocity and y-velocity, respectively.
2972
2973    Also convert latitude and longitude to UTM. All coordinates are
2974    assumed to be given in the GDA94 datum.
2975
2976    min's and max's: If omitted - full extend is used.
2977    To include a value min may equal it, while max must exceed it.
2978    Lat and lon are assuemd to be in decimal degrees
2979
2980    origin is a 3-tuple with geo referenced
2981    UTM coordinates (zone, easting, northing)
2982
2983    nc format has values organised as HA[TIME, LATITUDE, LONGITUDE]
2984    which means that longitude is the fastest
2985    varying dimension (row major order, so to speak)
2986
2987    ferret2sww uses grid points as vertices in a triangular grid
2988    counting vertices from lower left corner upwards, then right
2989    """
2990
2991    import os
2992    from Scientific.IO.NetCDF import NetCDFFile
2993
2994    precision = num.Float
2995
2996    msg = 'Must use latitudes and longitudes for minlat, maxlon etc'
2997
2998    if minlat != None:
2999        assert -90 < minlat < 90 , msg
3000    if maxlat != None:
3001        assert -90 < maxlat < 90 , msg
3002        if minlat != None:
3003            assert maxlat > minlat
3004    if minlon != None:
3005        assert -180 < minlon < 180 , msg
3006    if maxlon != None:
3007        assert -180 < maxlon < 180 , msg
3008        if minlon != None:
3009            assert maxlon > minlon
3010
3011    # Get NetCDF data
3012    if verbose: print 'Reading files %s_*.nc' % basename_in
3013
3014    file_h = NetCDFFile(basename_in + '_ha.nc', netcdf_mode_r) # Wave amplitude (cm)
3015    file_u = NetCDFFile(basename_in + '_ua.nc', netcdf_mode_r) # Velocity (x) (cm/s)
3016    file_v = NetCDFFile(basename_in + '_va.nc', netcdf_mode_r) # Velocity (y) (cm/s)
3017    file_e = NetCDFFile(basename_in + '_e.nc', netcdf_mode_r)  # Elevation (z) (m)
3018
3019    if basename_out is None:
3020        swwname = basename_in + '.sww'
3021    else:
3022        swwname = basename_out + '.sww'
3023
3024    # Get dimensions of file_h
3025    for dimension in file_h.dimensions.keys():
3026        if dimension[:3] == 'LON':
3027            dim_h_longitude = dimension
3028        if dimension[:3] == 'LAT':
3029            dim_h_latitude = dimension
3030        if dimension[:4] == 'TIME':
3031            dim_h_time = dimension
3032
3033    times = file_h.variables[dim_h_time]
3034    latitudes = file_h.variables[dim_h_latitude]
3035    longitudes = file_h.variables[dim_h_longitude]
3036
3037    kmin, kmax, lmin, lmax = _get_min_max_indexes(latitudes[:],
3038                                                  longitudes[:],
3039                                                  minlat, maxlat,
3040                                                  minlon, maxlon)
3041    # get dimensions for file_e
3042    for dimension in file_e.dimensions.keys():
3043        if dimension[:3] == 'LON':
3044            dim_e_longitude = dimension
3045        if dimension[:3] == 'LAT':
3046            dim_e_latitude = dimension
3047
3048    # get dimensions for file_u
3049    for dimension in file_u.dimensions.keys():
3050        if dimension[:3] == 'LON':
3051            dim_u_longitude = dimension
3052        if dimension[:3] == 'LAT':
3053            dim_u_latitude = dimension
3054        if dimension[:4] == 'TIME':
3055            dim_u_time = dimension
3056
3057    # get dimensions for file_v
3058    for dimension in file_v.dimensions.keys():
3059        if dimension[:3] == 'LON':
3060            dim_v_longitude = dimension
3061        if dimension[:3] == 'LAT':
3062            dim_v_latitude = dimension
3063        if dimension[:4] == 'TIME':
3064            dim_v_time = dimension
3065
3066    # Precision used by most for lat/lon is 4 or 5 decimals
3067    e_lat = num.around(file_e.variables[dim_e_latitude][:], 5)
3068    e_lon = num.around(file_e.variables[dim_e_longitude][:], 5)
3069
3070    # Check that files are compatible
3071    assert num.allclose(latitudes, file_u.variables[dim_u_latitude])
3072    assert num.allclose(latitudes, file_v.variables[dim_v_latitude])
3073    assert num.allclose(latitudes, e_lat)
3074
3075    assert num.allclose(longitudes, file_u.variables[dim_u_longitude])
3076    assert num.allclose(longitudes, file_v.variables[dim_v_longitude])
3077    assert num.allclose(longitudes, e_lon)
3078
3079    if mint is None:
3080        jmin = 0
3081        mint = times[0]
3082    else:
3083        jmin = num.searchsorted(times, mint)
3084
3085    if maxt is None:
3086        jmax = len(times)
3087        maxt = times[-1]
3088    else:
3089        jmax = num.searchsorted(times, maxt)
3090
3091    kmin, kmax, lmin, lmax = _get_min_max_indexes(latitudes[:],
3092                                                  longitudes[:],
3093                                                  minlat, maxlat,
3094                                                  minlon, maxlon)
3095
3096
3097    times = times[jmin:jmax]
3098    latitudes = latitudes[kmin:kmax]
3099    longitudes = longitudes[lmin:lmax]
3100
3101    if verbose: print 'cropping'
3102
3103    zname = 'ELEVATION'
3104
3105    amplitudes = file_h.variables['HA'][jmin:jmax, kmin:kmax, lmin:lmax]
3106    uspeed = file_u.variables['UA'][jmin:jmax, kmin:kmax, lmin:lmax] #Lon
3107    vspeed = file_v.variables['VA'][jmin:jmax, kmin:kmax, lmin:lmax] #Lat
3108    elevations = file_e.variables[zname][kmin:kmax, lmin:lmax]
3109
3110    #    if latitudes2[0]==latitudes[0] and latitudes2[-1]==latitudes[-1]:
3111    #        elevations = file_e.variables['ELEVATION'][kmin:kmax, lmin:lmax]
3112    #    elif latitudes2[0]==latitudes[-1] and latitudes2[-1]==latitudes[0]:
3113    #        from Numeric import asarray
3114    #        elevations=elevations.tolist()
3115    #        elevations.reverse()
3116    #        elevations=asarray(elevations)
3117    #    else:
3118    #        from Numeric import asarray
3119    #        elevations=elevations.tolist()
3120    #        elevations.reverse()
3121    #        elevations=asarray(elevations)
3122    #        'print hmmm'
3123
3124    #Get missing values
3125    nan_ha = file_h.variables['HA'].missing_value[0]
3126    nan_ua = file_u.variables['UA'].missing_value[0]
3127    nan_va = file_v.variables['VA'].missing_value[0]
3128    if hasattr(file_e.variables[zname],'missing_value'):
3129        nan_e  = file_e.variables[zname].missing_value[0]
3130    else:
3131        nan_e = None
3132
3133    #Cleanup
3134    missing = (amplitudes == nan_ha)
3135    if num.sometrue (missing):
3136        if fail_on_NaN:
3137            msg = 'NetCDFFile %s contains missing values' \
3138                  % basename_in + '_ha.nc'
3139            raise DataMissingValuesError, msg
3140        else:
3141            amplitudes = amplitudes*(missing==0) + missing*NaN_filler
3142
3143    missing = (uspeed == nan_ua)
3144    if num.sometrue (missing):
3145        if fail_on_NaN:
3146            msg = 'NetCDFFile %s contains missing values' \
3147                  % basename_in + '_ua.nc'
3148            raise DataMissingValuesError, msg
3149        else:
3150            uspeed = uspeed*(missing==0) + missing*NaN_filler
3151
3152    missing = (vspeed == nan_va)
3153    if num.sometrue (missing):
3154        if fail_on_NaN:
3155            msg = 'NetCDFFile %s contains missing values' \
3156                  % basename_in + '_va.nc'
3157            raise DataMissingValuesError, msg
3158        else:
3159            vspeed = vspeed*(missing==0) + missing*NaN_filler
3160
3161    missing = (elevations == nan_e)
3162    if num.sometrue (missing):
3163        if fail_on_NaN:
3164            msg = 'NetCDFFile %s contains missing values' \
3165                  % basename_in + '_e.nc'
3166            raise DataMissingValuesError, msg
3167        else:
3168            elevations = elevations*(missing==0) + missing*NaN_filler
3169
3170    number_of_times = times.shape[0]
3171    number_of_latitudes = latitudes.shape[0]
3172    number_of_longitudes = longitudes.shape[0]
3173
3174    assert amplitudes.shape[0] == number_of_times
3175    assert amplitudes.shape[1] == number_of_latitudes
3176    assert amplitudes.shape[2] == number_of_longitudes
3177
3178    if verbose:
3179        print '------------------------------------------------'
3180        print 'Statistics:'
3181        print '  Extent (lat/lon):'
3182        print '    lat in [%f, %f], len(lat) == %d' \
3183              % (min(latitudes.flat), max(latitudes.flat), len(latitudes.flat))
3184        print '    lon in [%f, %f], len(lon) == %d' \
3185              % (min(longitudes.flat), max(longitudes.flat),
3186                 len(longitudes.flat))
3187        print '    t in [%f, %f], len(t) == %d' \
3188              % (min(times.flat), max(times.flat), len(times.flat))
3189
3190        q = amplitudes.flat
3191        name = 'Amplitudes (ha) [cm]'
3192        print %s in [%f, %f]' % (name, min(q), max(q))
3193
3194        q = uspeed.flat
3195        name = 'Speeds (ua) [cm/s]'
3196        print %s in [%f, %f]' % (name, min(q), max(q))
3197
3198        q = vspeed.flat
3199        name = 'Speeds (va) [cm/s]'
3200        print %s in [%f, %f]' % (name, min(q), max(q))
3201
3202        q = elevations.flat
3203        name = 'Elevations (e) [m]'
3204        print %s in [%f, %f]' % (name, min(q), max(q))
3205
3206    # print number_of_latitudes, number_of_longitudes
3207    number_of_points = number_of_latitudes * number_of_longitudes
3208    number_of_volumes = (number_of_latitudes-1) * (number_of_longitudes-1) * 2
3209
3210    file_h.close()
3211    file_u.close()
3212    file_v.close()
3213    file_e.close()
3214
3215    # NetCDF file definition
3216    outfile = NetCDFFile(swwname, netcdf_mode_w)
3217
3218    description = 'Converted from Ferret files: %s, %s, %s, %s' \
3219                  % (basename_in + '_ha.nc',
3220                     basename_in + '_ua.nc',
3221                     basename_in + '_va.nc',
3222                     basename_in + '_e.nc')
3223
3224    # Create new file
3225    starttime = times[0]
3226
3227    sww = Write_sww()
3228    sww.store_header(outfile, times, number_of_volumes,
3229                     number_of_points, description=description,
3230                     verbose=verbose, sww_precision=num.Float)
3231
3232    # Store
3233    from anuga.coordinate_transforms.redfearn import redfearn
3234    x = num.zeros(number_of_points, num.Float)  #Easting
3235    y = num.zeros(number_of_points, num.Float)  #Northing
3236
3237    if verbose: print 'Making triangular grid'
3238
3239    # Check zone boundaries
3240    refzone, _, _ = redfearn(latitudes[0], longitudes[0])
3241
3242    vertices = {}
3243    i = 0
3244    for k, lat in enumerate(latitudes):       # Y direction
3245        for l, lon in enumerate(longitudes):  # X direction
3246            vertices[l,k] = i
3247
3248            zone, easting, northing = redfearn(lat,lon)
3249
3250            #msg = 'Zone boundary crossed at longitude =', lon
3251            #assert zone == refzone, msg
3252            #print '%7.2f %7.2f %8.2f %8.2f' %(lon, lat, easting, northing)
3253            x[i] = easting
3254            y[i] = northing
3255            i += 1
3256
3257    #Construct 2 triangles per 'rectangular' element
3258    volumes = []
3259    for l in range(number_of_longitudes-1):    # X direction
3260        for k in range(number_of_latitudes-1): # Y direction
3261            v1 = vertices[l,k+1]
3262            v2 = vertices[l,k]
3263            v3 = vertices[l+1,k+1]
3264            v4 = vertices[l+1,k]
3265
3266            volumes.append([v1,v2,v3]) #Upper element
3267            volumes.append([v4,v3,v2]) #Lower element
3268
3269    volumes = num.array(volumes, num.Int)      #array default#
3270
3271    if origin is None:
3272        origin = Geo_reference(refzone, min(x), min(y))
3273    geo_ref = write_NetCDF_georeference(origin, outfile)
3274
3275    if elevation is not None:
3276        z = elevation
3277    else:
3278        if inverted_bathymetry:
3279            z = -1 * elevations
3280        else:
3281            z = elevations
3282    #FIXME: z should be obtained from MOST and passed in here
3283
3284    #FIXME use the Write_sww instance(sww) to write this info
3285    z = num.resize(z, outfile.variables['z'][:].shape)
3286    outfile.variables['x'][:] = x - geo_ref.get_xllcorner()
3287    outfile.variables['y'][:] = y - geo_ref.get_yllcorner()
3288    outfile.variables['z'][:] = z             #FIXME HACK for bacwards compat.
3289    outfile.variables['elevation'][:] = z
3290    outfile.variables['volumes'][:] = volumes.astype(num.Int32) #For Opteron 64
3291
3292    #Time stepping
3293    stage = outfile.variables['stage']
3294    xmomentum = outfile.variables['xmomentum']
3295    ymomentum = outfile.variables['ymomentum']
3296
3297    if verbose: print 'Converting quantities'
3298
3299    n = len(times)
3300    for j in range(n):
3301        if verbose and j % ((n+10)/10) == 0: print '  Doing %d of %d' %(j, n)
3302
3303        i = 0
3304        for k in range(number_of_latitudes):      # Y direction
3305            for l in range(number_of_longitudes): # X direction
3306                w = zscale * amplitudes[j,k,l] / 100 + mean_stage
3307                stage[j,i] = w
3308                h = w - z[i]
3309                xmomentum[j,i] = uspeed[j,k,l]/100*h
3310                ymomentum[j,i] = vspeed[j,k,l]/100*h
3311                i += 1
3312
3313    #outfile.close()
3314
3315    #FIXME: Refactor using code from file_function.statistics
3316    #Something like print swwstats(swwname)
3317    if verbose:
3318        x = outfile.variables['x'][:]
3319        y = outfile.variables['y'][:]
3320        print '------------------------------------------------'
3321        print 'Statistics of output file:'
3322        print '  Name: %s' %swwname
3323        print '  Reference:'
3324        print '    Lower left corner: [%f, %f]' \
3325              % (geo_ref.get_xllcorner(), geo_ref.get_yllcorner())
3326        print ' Start time: %f' %starttime
3327        print '    Min time: %f' %mint
3328        print '    Max time: %f' %maxt
3329        print '  Extent:'
3330        print '    x [m] in [%f, %f], len(x) == %d' \
3331              % (min(x.flat), max(x.flat), len(x.flat))
3332        print '    y [m] in [%f, %f], len(y) == %d' \
3333              % (min(y.flat), max(y.flat), len(y.flat))
3334        print '    t [s] in [%f, %f], len(t) == %d' \
3335              % (min(times), max(times), len(times))
3336        print '  Quantities [SI units]:'
3337        for name in ['stage', 'xmomentum', 'ymomentum', 'elevation']:
3338            q = outfile.variables[name][:].flat
3339            print '    %s in [%f, %f]' % (name, min(q), max(q))
3340
3341    outfile.close()
3342
3343
3344##
3345# @brief Convert time-series text file to TMS file.
3346# @param filename
3347# @param quantity_names
3348# @param time_as_seconds
3349def timefile2netcdf(filename, quantity_names=None, time_as_seconds=False):
3350    """Template for converting typical text files with time series to
3351    NetCDF tms file.
3352
3353    The file format is assumed to be either two fields separated by a comma:
3354
3355        time [DD/MM/YY hh:mm:ss], value0 value1 value2 ...
3356
3357    E.g
3358
3359      31/08/04 00:00:00, 1.328223 0 0
3360      31/08/04 00:15:00, 1.292912 0 0
3361
3362    or time (seconds), value0 value1 value2 ...
3363
3364      0.0, 1.328223 0 0
3365      0.1, 1.292912 0 0
3366
3367    will provide a time dependent function f(t) with three attributes
3368
3369    filename is assumed to be the rootname with extenisons .txt and .sww
3370    """
3371
3372    import time, calendar
3373    from anuga.config import time_format
3374    from anuga.utilities.numerical_tools import ensure_numeric
3375
3376    file_text = filename + '.txt'
3377    fid = open(file_text)
3378    line = fid.readline()
3379    fid.close()
3380
3381    fields = line.split(',')
3382    msg = "File %s must have the format 'datetime, value0 value1 value2 ...'" \
3383          % file_text
3384    assert len(fields) == 2, msg
3385
3386    if not time_as_seconds:
3387        try:
3388            starttime = calendar.timegm(time.strptime(fields[0], time_format))
3389        except ValueError:
3390            msg = 'First field in file %s must be' % file_text
3391            msg += ' date-time with format %s.\n' % time_format
3392            msg += 'I got %s instead.' % fields[0]
3393            raise DataTimeError, msg
3394    else:
3395        try:
3396            starttime = float(fields[0])
3397        except Error:
3398            msg = "Bad time format"
3399            raise DataTimeError, msg
3400
3401    # Split values
3402    values = []
3403    for value in fields[1].split():
3404        values.append(float(value))
3405
3406    q = ensure_numeric(values)
3407
3408    msg = 'ERROR: File must contain at least one independent value'
3409    assert len(q.shape) == 1, msg
3410
3411    # Read times proper
3412    from anuga.config import time_format
3413    import time, calendar
3414
3415    fid = open(file_text)
3416    lines = fid.readlines()
3417    fid.close()
3418
3419    N = len(lines)
3420    d = len(q)
3421
3422    T = num.zeros(N, num.Float)       # Time
3423    Q = num.zeros((N, d), num.Float)  # Values
3424
3425    for i, line in enumerate(lines):
3426        fields = line.split(',')
3427        if not time_as_seconds:
3428            realtime = calendar.timegm(time.strptime(fields[0], time_format))
3429        else:
3430             realtime = float(fields[0])
3431        T[i] = realtime - starttime
3432
3433        for j, value in enumerate(fields[1].split()):
3434            Q[i, j] = float(value)
3435
3436    msg = 'File %s must list time as a monotonuosly ' % filename
3437    msg += 'increasing sequence'
3438    assert num.alltrue(T[1:] - T[:-1] > 0), msg
3439
3440    #Create NetCDF file
3441    from Scientific.IO.NetCDF import NetCDFFile
3442
3443    fid = NetCDFFile(filename + '.tms', netcdf_mode_w)
3444
3445    fid.institution = 'Geoscience Australia'
3446    fid.description = 'Time series'
3447
3448    #Reference point
3449    #Start time in seconds since the epoch (midnight 1/1/1970)
3450    #FIXME: Use Georef
3451    fid.starttime = starttime
3452
3453    # dimension definitions
3454    #fid.createDimension('number_of_volumes', self.number_of_volumes)
3455    #fid.createDimension('number_of_vertices', 3)
3456
3457    fid.createDimension('number_of_timesteps', len(T))
3458
3459    fid.createVariable('time', num.Float, ('number_of_timesteps',))
3460
3461    fid.variables['time'][:] = T
3462
3463    for i in range(Q.shape[1]):
3464        try:
3465            name = quantity_names[i]
3466        except:
3467            name = 'Attribute%d' % i
3468
3469        fid.createVariable(name, num.Float, ('number_of_timesteps',))
3470        fid.variables[name][:] = Q[:,i]
3471
3472    fid.close()
3473
3474
3475##
3476# @brief Get the extents of a NetCDF data file.
3477# @param file_name The path to the SWW file.
3478# @return A list of x, y, z and stage limits (min, max).
3479def extent_sww(file_name):
3480    """Read in an sww file.
3481
3482    Input:
3483    file_name - the sww file
3484
3485    Output:
3486    A list: [min(x),max(x),min(y),max(y),min(stage.flat),max(stage.flat)]
3487    """
3488
3489    from Scientific.IO.NetCDF import NetCDFFile
3490
3491    #Get NetCDF
3492    fid = NetCDFFile(file_name, netcdf_mode_r)
3493
3494    # Get the variables
3495    x = fid.variables['x'][:]
3496    y = fid.variables['y'][:]
3497    stage = fid.variables['stage'][:]
3498
3499    fid.close()
3500
3501    return [min(x), max(x), min(y), max(y), min(stage.flat), max(stage.flat)]
3502
3503
3504##
3505# @brief
3506# @param filename
3507# @param boundary
3508# @param t
3509# @param fail_if_NaN
3510# @param NaN_filler
3511# @param verbose
3512# @param very_verbose
3513# @return
3514def sww2domain(filename, boundary=None, t=None,
3515               fail_if_NaN=True, NaN_filler=0,
3516               verbose=False, very_verbose=False):
3517    """
3518    Usage: domain = sww2domain('file.sww',t=time (default = last time in file))
3519
3520    Boundary is not recommended if domain.smooth is not selected, as it
3521    uses unique coordinates, but not unique boundaries. This means that
3522    the boundary file will not be compatable with the coordinates, and will
3523    give a different final boundary, or crash.
3524    """
3525
3526    from Scientific.IO.NetCDF import NetCDFFile
3527    from shallow_water import Domain
3528
3529    # initialise NaN.
3530    NaN = 9.969209968386869e+036
3531
3532    if verbose: print 'Reading from ', filename
3533
3534    fid = NetCDFFile(filename, netcdf_mode_r)    # Open existing file for read
3535    time = fid.variables['time']       # Timesteps
3536    if t is None:
3537        t = time[-1]
3538    time_interp = get_time_interp(time,t)
3539
3540    # Get the variables as Numeric arrays
3541    x = fid.variables['x'][:]                   # x-coordinates of vertices
3542    y = fid.variables['y'][:]                   # y-coordinates of vertices
3543    elevation = fid.variables['elevation']      # Elevation
3544    stage = fid.variables['stage']              # Water level
3545    xmomentum = fid.variables['xmomentum']      # Momentum in the x-direction
3546    ymomentum = fid.variables['ymomentum']      # Momentum in the y-direction
3547
3548    starttime = fid.starttime[0]
3549    volumes = fid.variables['volumes'][:]       # Connectivity
3550    coordinates = num.transpose(num.asarray([x.tolist(), y.tolist()]))
3551    # FIXME (Ole): Something like this might be better:
3552    #                 concatenate((x, y), axis=1)
3553    # or              concatenate((x[:,num.NewAxis], x[:,num.NewAxis]), axis=1)
3554
3555    conserved_quantities = []
3556    interpolated_quantities = {}
3557    other_quantities = []
3558
3559    # get geo_reference
3560    try:                             # sww files don't have to have a geo_ref
3561        geo_reference = Geo_reference(NetCDFObject=fid)
3562    except: # AttributeError, e:
3563        geo_reference = None
3564
3565    if verbose: print '    getting quantities'
3566
3567    for quantity in fid.variables.keys():
3568        dimensions = fid.variables[quantity].dimensions
3569        if 'number_of_timesteps' in dimensions:
3570            conserved_quantities.append(quantity)
3571            interpolated_quantities[quantity] = \
3572                  interpolated_quantity(fid.variables[quantity][:], time_interp)
3573        else:
3574            other_quantities.append(quantity)
3575
3576    other_quantities.remove('x')
3577    other_quantities.remove('y')
3578    other_quantities.remove('z')
3579    other_quantities.remove('volumes')
3580    try:
3581        other_quantities.remove('stage_range')
3582        other_quantities.remove('xmomentum_range')
3583        other_quantities.remove('ymomentum_range')
3584        other_quantities.remove('elevation_range')
3585    except:
3586        pass
3587
3588    conserved_quantities.remove('time')
3589
3590    if verbose: print '    building domain'
3591
3592    #    From domain.Domain:
3593    #    domain = Domain(coordinates, volumes,\
3594    #                    conserved_quantities = conserved_quantities,\
3595    #                    other_quantities = other_quantities,zone=zone,\
3596    #                    xllcorner=xllcorner, yllcorner=yllcorner)
3597
3598    # From shallow_water.Domain:
3599    coordinates = coordinates.tolist()
3600    volumes = volumes.tolist()
3601    # FIXME:should this be in mesh? (peter row)
3602    if fid.smoothing == 'Yes':
3603        unique = False
3604    else:
3605        unique = True
3606    if unique:
3607        coordinates, volumes, boundary = weed(coordinates, volumes,boundary)
3608
3609    try:
3610        domain = Domain(coordinates, volumes, boundary)
3611    except AssertionError, e:
3612        fid.close()
3613        msg = 'Domain could not be created: %s. ' \
3614              'Perhaps use "fail_if_NaN=False and NaN_filler = ..."' % e
3615        raise DataDomainError, msg
3616
3617    if not boundary is None:
3618        domain.boundary = boundary
3619
3620    domain.geo_reference = geo_reference
3621
3622    domain.starttime = float(starttime) + float(t)
3623    domain.time = 0.0
3624
3625    for quantity in other_quantities:
3626        try:
3627            NaN = fid.variables[quantity].missing_value
3628        except:
3629            pass                       # quantity has no missing_value number
3630        X = fid.variables[quantity][:]
3631        if very_verbose:
3632            print '       ', quantity
3633            print '        NaN =', NaN
3634            print '        max(X)'
3635            print '       ', max(X)
3636            print '        max(X)==NaN'
3637            print '       ', max(X)==NaN
3638            print ''
3639        if max(X) == NaN or min(X) == NaN:
3640            if fail_if_NaN:
3641                msg = 'quantity "%s" contains no_data entry' % quantity
3642                raise DataMissingValuesError, msg
3643            else:
3644                data = (X != NaN)
3645                X = (X*data) + (data==0)*NaN_filler
3646        if unique:
3647            X = num.resize(X, (len(X)/3, 3))
3648        domain.set_quantity(quantity, X)
3649    #
3650    for quantity in conserved_quantities:
3651        try:
3652            NaN = fid.variables[quantity].missing_value
3653        except:
3654            pass                       # quantity has no missing_value number
3655        X = interpolated_quantities[quantity]
3656        if very_verbose:
3657            print '       ',quantity
3658            print '        NaN =', NaN
3659            print '        max(X)'
3660            print '       ', max(X)
3661            print '        max(X)==NaN'
3662            print '       ', max(X)==NaN
3663            print ''
3664        if max(X) == NaN or min(X) == NaN:
3665            if fail_if_NaN:
3666                msg = 'quantity "%s" contains no_data entry' % quantity
3667                raise DataMissingValuesError, msg
3668            else:
3669                data = (X != NaN)
3670                X = (X*data) + (data==0)*NaN_filler
3671        if unique:
3672            X = num.resize(X, (X.shape[0]/3, 3))
3673        domain.set_quantity(quantity, X)
3674
3675    fid.close()
3676
3677    return domain
3678
3679
3680##
3681# @brief Interpolate a quantity wrt time.
3682# @param saved_quantity The quantity to interpolate.
3683# @param time_interp (index, ratio)
3684# @return A vector of interpolated values.
3685def interpolated_quantity(saved_quantity, time_interp):
3686    '''Given an index and ratio, interpolate quantity with respect to time.'''
3687
3688    index, ratio = time_interp
3689
3690    Q = saved_quantity
3691
3692    if ratio > 0:
3693        q = (1-ratio)*Q[index] + ratio*Q[index+1]
3694    else:
3695        q = Q[index]
3696
3697    #Return vector of interpolated values
3698    return q
3699
3700
3701##
3702# @brief
3703# @parm time
3704# @param t
3705# @return An (index, ration) tuple.
3706def get_time_interp(time, t=None):
3707    #Finds the ratio and index for time interpolation.
3708    #It is borrowed from previous abstract_2d_finite_volumes code.
3709    if t is None:
3710        t=time[-1]
3711        index = -1
3712        ratio = 0.
3713    else:
3714        T = time
3715        tau = t
3716        index=0
3717        msg = 'Time interval derived from file %s [%s:%s]' \
3718              % ('FIXMEfilename', T[0], T[-1])
3719        msg += ' does not match model time: %s' % tau
3720        if tau < time[0]: raise DataTimeError, msg
3721        if tau > time[-1]: raise DataTimeError, msg
3722        while tau > time[index]: index += 1
3723        while tau < time[index]: index -= 1
3724        if tau == time[index]:
3725            #Protect against case where tau == time[-1] (last time)
3726            # - also works in general when tau == time[i]
3727            ratio = 0
3728        else:
3729            #t is now between index and index+1
3730            ratio = (tau - time[index])/(time[index+1] - time[index])
3731
3732    return (index, ratio)
3733
3734
3735##
3736# @brief
3737# @param coordinates
3738# @param volumes
3739# @param boundary
3740def weed(coordinates, volumes, boundary=None):
3741    if type(coordinates) == num.ArrayType:
3742        coordinates = coordinates.tolist()
3743    if type(volumes) == num.ArrayType:
3744        volumes = volumes.tolist()
3745
3746    unique = False
3747    point_dict = {}
3748    same_point = {}
3749    for i in range(len(coordinates)):
3750        point = tuple(coordinates[i])
3751        if point_dict.has_key(point):
3752            unique = True
3753            same_point[i] = point
3754            #to change all point i references to point j
3755        else:
3756            point_dict[point] = i
3757            same_point[i] = point
3758
3759    coordinates = []
3760    i = 0
3761    for point in point_dict.keys():
3762        point = tuple(point)
3763        coordinates.append(list(point))
3764        point_dict[point] = i
3765        i += 1
3766
3767    for volume in volumes:
3768        for i in range(len(volume)):
3769            index = volume[i]
3770            if index > -1:
3771                volume[i] = point_dict[same_point[index]]
3772
3773    new_boundary = {}
3774    if not boundary is None:
3775        for segment in boundary.keys():
3776            point0 = point_dict[same_point[segment[0]]]
3777            point1 = point_dict[same_point[segment[1]]]
3778            label = boundary[segment]
3779            #FIXME should the bounday attributes be concaterated
3780            #('exterior, pond') or replaced ('pond')(peter row)
3781
3782            if new_boundary.has_key((point0, point1)):
3783                new_boundary[(point0,point1)] = new_boundary[(point0, point1)]
3784
3785            elif new_boundary.has_key((point1, point0)):
3786                new_boundary[(point1,point0)] = new_boundary[(point1, point0)]
3787            else: new_boundary[(point0, point1)] = label
3788
3789        boundary = new_boundary
3790
3791    return coordinates, volumes, boundary
3792
3793
3794##
3795# @brief Read DEM file, decimate data, write new DEM file.
3796# @param basename_in Stem of the input filename.
3797# @param stencil
3798# @param cellsize_new New cell size to resample on.
3799# @param basename_out Stem of the output filename.
3800# @param verbose True if this function is to be verbose.
3801def decimate_dem(basename_in, stencil, cellsize_new, basename_out=None,
3802                 verbose=False):
3803    """Read Digitial Elevation model from the following NetCDF format (.dem)
3804
3805    Example:
3806
3807    ncols         3121
3808    nrows         1800
3809    xllcorner     722000
3810    yllcorner     5893000
3811    cellsize      25
3812    NODATA_value  -9999
3813    138.3698 137.4194 136.5062 135.5558 ..........
3814
3815    Decimate data to cellsize_new using stencil and write to NetCDF dem format.
3816    """
3817
3818    import os
3819    from Scientific.IO.NetCDF import NetCDFFile
3820
3821    root = basename_in
3822    inname = root + '.dem'
3823
3824    #Open existing netcdf file to read
3825    infile = NetCDFFile(inname, netcdf_mode_r)
3826
3827    if verbose: print 'Reading DEM from %s' % inname
3828
3829    #Read metadata
3830    ncols = infile.ncols[0]
3831    nrows = infile.nrows[0]
3832    xllcorner = infile.xllcorner[0]
3833    yllcorner = infile.yllcorner[0]
3834    cellsize = infile.cellsize[0]
3835    NODATA_value = infile.NODATA_value[0]
3836    zone = infile.zone[0]
3837    false_easting = infile.false_easting[0]
3838    false_northing = infile.false_northing[0]
3839    projection = infile.projection
3840    datum = infile.datum
3841    units = infile.units
3842
3843    dem_elevation = infile.variables['elevation']
3844
3845    #Get output file name
3846    if basename_out == None:
3847        outname = root + '_' + repr(cellsize_new) + '.dem'
3848    else:
3849        outname = basename_out + '.dem'
3850
3851    if verbose: print 'Write decimated NetCDF file to %s' % outname
3852
3853    #Determine some dimensions for decimated grid
3854    (nrows_stencil, ncols_stencil) = stencil.shape
3855    x_offset = ncols_stencil / 2
3856    y_offset = nrows_stencil / 2
3857    cellsize_ratio = int(cellsize_new / cellsize)
3858    ncols_new = 1 + (ncols - ncols_stencil) / cellsize_ratio
3859    nrows_new = 1 + (nrows - nrows_stencil) / cellsize_ratio
3860
3861    #Open netcdf file for output
3862    outfile = NetCDFFile(outname, netcdf_mode_w)
3863
3864    #Create new file
3865    outfile.institution = 'Geoscience Australia'
3866    outfile.description = 'NetCDF DEM format for compact and portable ' \
3867                          'storage of spatial point data'
3868
3869    #Georeferencing
3870    outfile.zone = zone
3871    outfile.projection = projection
3872    outfile.datum = datum
3873    outfile.units = units
3874
3875    outfile.cellsize = cellsize_new
3876    outfile.NODATA_value = NODATA_value
3877    outfile.false_easting = false_easting
3878    outfile.false_northing = false_northing
3879
3880    outfile.xllcorner = xllcorner + (x_offset * cellsize)
3881    outfile.yllcorner = yllcorner + (y_offset * cellsize)
3882    outfile.ncols = ncols_new
3883    outfile.nrows = nrows_new
3884
3885    # dimension definition
3886    outfile.createDimension('number_of_points', nrows_new*ncols_new)
3887
3888    # variable definition
3889    outfile.createVariable('elevation', num.Float, ('number_of_points',))
3890
3891    # Get handle to the variable
3892    elevation = outfile.variables['elevation']
3893
3894    dem_elevation_r = num.reshape(dem_elevation, (nrows, ncols))
3895
3896    #Store data
3897    global_index = 0
3898    for i in range(nrows_new):
3899        if verbose: print 'Processing row %d of %d' %(i, nrows_new)
3900
3901        lower_index = global_index
3902        telev = num.zeros(ncols_new, num.Float)
3903        local_index = 0
3904        trow = i * cellsize_ratio
3905
3906        for j in range(ncols_new):
3907            tcol = j * cellsize_ratio
3908            tmp = dem_elevation_r[trow:trow+nrows_stencil,
3909                                  tcol:tcol+ncols_stencil]
3910
3911            #if dem contains 1 or more NODATA_values set value in
3912            #decimated dem to NODATA_value, else compute decimated
3913            #value using stencil
3914            if num.sum(num.sum(num.equal(tmp, NODATA_value))) > 0:
3915                telev[local_index] = NODATA_value
3916            else:
3917                telev[local_index] = num.sum(num.sum(tmp * stencil))
3918
3919            global_index += 1
3920            local_index += 1
3921
3922        upper_index = global_index
3923
3924        elevation[lower_index:upper_index] = telev
3925
3926    assert global_index == nrows_new*ncols_new, \
3927           'index not equal to number of points'
3928
3929    infile.close()
3930    outfile.close()
3931
3932
3933##
3934# @brief
3935# @param filename
3936# @param verbose
3937def tsh2sww(filename, verbose=False):
3938    """
3939    to check if a tsh/msh file 'looks' good.
3940    """
3941
3942    if verbose == True:print 'Creating domain from', filename
3943
3944    domain = pmesh_to_domain_instance(filename, Domain)
3945
3946    if verbose == True:print "Number of triangles = ", len(domain)
3947
3948    domain.smooth = True
3949    domain.format = 'sww'   #Native netcdf visualisation format
3950    file_path, filename = path.split(filename)
3951    filename, ext = path.splitext(filename)
3952    domain.set_name(filename)
3953    domain.reduction = mean
3954
3955    if verbose == True:print "file_path",file_path
3956
3957    if file_path == "":
3958        file_path = "."
3959    domain.set_datadir(file_path)
3960
3961    if verbose == True:
3962        print "Output written to " + domain.get_datadir() + sep + \
3963              domain.get_name() + "." + domain.format
3964
3965    sww = get_dataobject(domain)
3966    sww.store_connectivity()
3967    sww.store_timestep()
3968
3969
3970##
3971# @brief Convert CSIRO ESRI file to an SWW boundary file.
3972# @param bath_dir
3973# @param elevation_dir
3974# @param ucur_dir
3975# @param vcur_dir
3976# @param sww_file
3977# @param minlat
3978# @param maxlat
3979# @param minlon
3980# @param maxlon
3981# @param zscale
3982# @param mean_stage
3983# @param fail_on_NaN
3984# @param elevation_NaN_filler
3985# @param bath_prefix
3986# @param elevation_prefix
3987# @param verbose
3988# @note Also convert latitude and longitude to UTM. All coordinates are
3989#       assumed to be given in the GDA94 datum.
3990def asc_csiro2sww(bath_dir,
3991                  elevation_dir,
3992                  ucur_dir,
3993                  vcur_dir,
3994                  sww_file,
3995                  minlat=None, maxlat=None,
3996                  minlon=None, maxlon=None,
3997                  zscale=1,
3998                  mean_stage=0,
3999                  fail_on_NaN=True,
4000                  elevation_NaN_filler=0,
4001                  bath_prefix='ba',
4002                  elevation_prefix='el',
4003                  verbose=False):
4004    """
4005    Produce an sww boundary file, from esri ascii data from CSIRO.
4006
4007    Also convert latitude and longitude to UTM. All coordinates are
4008    assumed to be given in the GDA94 datum.
4009
4010    assume:
4011    All files are in esri ascii format
4012
4013    4 types of information
4014    bathymetry
4015    elevation
4016    u velocity
4017    v velocity
4018
4019    Assumptions
4020    The metadata of all the files is the same
4021    Each type is in a seperate directory
4022    One bath file with extention .000
4023    The time period is less than 24hrs and uniform.
4024    """
4025
4026    from Scientific.IO.NetCDF import NetCDFFile
4027
4028    from anuga.coordinate_transforms.redfearn import redfearn
4029
4030    precision = num.Float # So if we want to change the precision its done here
4031
4032    # go in to the bath dir and load the only file,
4033    bath_files = os.listdir(bath_dir)
4034    bath_file = bath_files[0]
4035    bath_dir_file =  bath_dir + os.sep + bath_file
4036    bath_metadata, bath_grid =  _read_asc(bath_dir_file)
4037
4038    #Use the date.time of the bath file as a basis for
4039    #the start time for other files
4040    base_start = bath_file[-12:]
4041
4042    #go into the elevation dir and load the 000 file
4043    elevation_dir_file = elevation_dir  + os.sep + elevation_prefix \
4044                         + base_start
4045
4046    elevation_files = os.listdir(elevation_dir)
4047    ucur_files = os.listdir(ucur_dir)
4048    vcur_files = os.listdir(vcur_dir)
4049    elevation_files.sort()
4050
4051    # the first elevation file should be the
4052    # file with the same base name as the bath data
4053    assert elevation_files[0] == 'el' + base_start
4054
4055    number_of_latitudes = bath_grid.shape[0]
4056    number_of_longitudes = bath_grid.shape[1]
4057    number_of_volumes = (number_of_latitudes-1) * (number_of_longitudes-1) * 2
4058
4059    longitudes = [bath_metadata['xllcorner'] + x*bath_metadata['cellsize'] \
4060                  for x in range(number_of_longitudes)]
4061    latitudes = [bath_metadata['yllcorner'] + y*bath_metadata['cellsize'] \
4062                 for y in range(number_of_latitudes)]
4063
4064     # reverse order of lat, so the first lat represents the first grid row
4065    latitudes.reverse()
4066
4067    kmin, kmax, lmin, lmax = _get_min_max_indexes(latitudes[:],longitudes[:],
4068                                                  minlat=minlat, maxlat=maxlat,
4069                                                  minlon=minlon, maxlon=maxlon)
4070
4071    bath_grid = bath_grid[kmin:kmax,lmin:lmax]
4072    latitudes = latitudes[kmin:kmax]
4073    longitudes = longitudes[lmin:lmax]
4074    number_of_latitudes = len(latitudes)
4075    number_of_longitudes = len(longitudes)
4076    number_of_times = len(os.listdir(elevation_dir))
4077    number_of_points = number_of_latitudes * number_of_longitudes
4078    number_of_volumes = (number_of_latitudes-1) * (number_of_longitudes-1) * 2
4079
4080    #Work out the times
4081    if len(elevation_files) > 1:
4082        # Assume: The time period is less than 24hrs.
4083        time_period = (int(elevation_files[1][-3:]) \
4084                       - int(elevation_files[0][-3:])) * 60*60
4085        times = [x*time_period for x in range(len(elevation_files))]
4086    else:
4087        times = [0.0]
4088
4089    if verbose:
4090        print '------------------------------------------------'
4091        print 'Statistics:'
4092        print '  Extent (lat/lon):'
4093        print '    lat in [%f, %f], len(lat) == %d' \
4094              % (min(latitudes), max(latitudes), len(latitudes))
4095        print '    lon in [%f, %f], len(lon) == %d' \
4096              % (min(longitudes), max(longitudes), len(longitudes))
4097        print '    t in [%f, %f], len(t) == %d' \
4098              % (min(times), max(times), len(times))
4099
4100    ######### WRITE THE SWW FILE #############
4101
4102    # NetCDF file definition
4103    outfile = NetCDFFile(sww_file, netcdf_mode_w)
4104
4105    #Create new file
4106    outfile.institution = 'Geoscience Australia'
4107    outfile.description = 'Converted from XXX'
4108
4109    #For sww compatibility
4110    outfile.smoothing = 'Yes'
4111    outfile.order = 1
4112
4113    #Start time in seconds since the epoch (midnight 1/1/1970)
4114    outfile.starttime = starttime = times[0]
4115
4116    # dimension definitions
4117    outfile.createDimension('number_of_volumes', number_of_volumes)
4118    outfile.createDimension('number_of_vertices', 3)
4119    outfile.createDimension('number_of_points', number_of_points)
4120    outfile.createDimension('number_of_timesteps', number_of_times)
4121
4122    # variable definitions
4123    outfile.createVariable('x', precision, ('number_of_points',))
4124    outfile.createVariable('y', precision, ('number_of_points',))
4125    outfile.createVariable('elevation', precision, ('number_of_points',))
4126
4127    #FIXME: Backwards compatibility
4128    outfile.createVariable('z', precision, ('number_of_points',))
4129    #################################
4130
4131    outfile.createVariable('volumes', num.Int, ('number_of_volumes',
4132                                                'number_of_vertices'))
4133
4134    outfile.createVariable('time', precision, ('number_of_timesteps',))
4135
4136    outfile.createVariable('stage', precision, ('number_of_timesteps',
4137                                                'number_of_points'))
4138
4139    outfile.createVariable('xmomentum', precision, ('number_of_timesteps',
4140                                                    'number_of_points'))
4141
4142    outfile.createVariable('ymomentum', precision, ('number_of_timesteps',
4143                                                    'number_of_points'))
4144
4145    #Store
4146    from anuga.coordinate_transforms.redfearn import redfearn
4147
4148    x = num.zeros(number_of_points, num.Float)  #Easting
4149    y = num.zeros(number_of_points, num.Float)  #Northing
4150
4151    if verbose: print 'Making triangular grid'
4152
4153    #Get zone of 1st point.
4154    refzone, _, _ = redfearn(latitudes[0], longitudes[0])
4155
4156    vertices = {}
4157    i = 0
4158    for k, lat in enumerate(latitudes):
4159        for l, lon in enumerate(longitudes):
4160            vertices[l,k] = i
4161
4162            zone, easting, northing = redfearn(lat, lon)
4163
4164            #msg = 'Zone boundary crossed at longitude =', lon
4165            #assert zone == refzone, msg
4166            #print '%7.2f %7.2f %8.2f %8.2f' %(lon, lat, easting, northing)
4167            x[i] = easting
4168            y[i] = northing
4169            i += 1
4170
4171    #Construct 2 triangles per 'rectangular' element
4172    volumes = []
4173    for l in range(number_of_longitudes-1):    #X direction
4174        for k in range(number_of_latitudes-1): #Y direction
4175            v1 = vertices[l,k+1]
4176            v2 = vertices[l,k]
4177            v3 = vertices[l+1,k+1]
4178            v4 = vertices[l+1,k]
4179
4180            #Note, this is different to the ferrit2sww code
4181            #since the order of the lats is reversed.
4182            volumes.append([v1,v3,v2]) #Upper element
4183            volumes.append([v4,v2,v3]) #Lower element
4184
4185    volumes = num.array(volumes, num.Int)      #array default#
4186
4187    geo_ref = Geo_reference(refzone, min(x), min(y))
4188    geo_ref.write_NetCDF(outfile)
4189
4190    # This will put the geo ref in the middle
4191    #geo_ref = Geo_reference(refzone, (max(x)+min(x))/2., (max(x)+min(y))/2.)
4192
4193    if verbose:
4194        print '------------------------------------------------'
4195        print 'More Statistics:'
4196        print '  Extent (/lon):'
4197        print '    x in [%f, %f], len(lat) == %d' \
4198              % (min(x), max(x), len(x))
4199        print '    y in [%f, %f], len(lon) == %d' \
4200              % (min(y), max(y), len(y))
4201        print 'geo_ref: ', geo_ref
4202
4203    z = num.resize(bath_grid,outfile.variables['z'][:].shape)
4204    outfile.variables['x'][:] = x - geo_ref.get_xllcorner()
4205    outfile.variables['y'][:] = y - geo_ref.get_yllcorner()
4206# FIXME (Ole): Remove once viewer has been recompiled and changed
4207#              to use elevation instead of z
4208    outfile.variables['z'][:] = z
4209    outfile.variables['elevation'][:] = z
4210    outfile.variables['volumes'][:] = volumes.astype(num.Int32) # On Opteron 64
4211
4212    stage = outfile.variables['stage']
4213    xmomentum = outfile.variables['xmomentum']
4214    ymomentum = outfile.variables['ymomentum']
4215
4216    outfile.variables['time'][:] = times   #Store time relative
4217
4218    if verbose: print 'Converting quantities'
4219
4220    n = number_of_times
4221    for j in range(number_of_times):
4222        # load in files
4223        elevation_meta, elevation_grid = \
4224            _read_asc(elevation_dir + os.sep + elevation_files[j])
4225
4226        _, u_momentum_grid = _read_asc(ucur_dir + os.sep + ucur_files[j])
4227        _, v_momentum_grid = _read_asc(vcur_dir + os.sep + vcur_files[j])
4228
4229        #cut matrix to desired size
4230        elevation_grid = elevation_grid[kmin:kmax,lmin:lmax]
4231        u_momentum_grid = u_momentum_grid[kmin:kmax,lmin:lmax]
4232        v_momentum_grid = v_momentum_grid[kmin:kmax,lmin:lmax]
4233
4234        # handle missing values
4235        missing = (elevation_grid == elevation_meta['NODATA_value'])
4236        if num.sometrue (missing):
4237            if fail_on_NaN:
4238                msg = 'File %s contains missing values' \
4239                      % (elevation_files[j])
4240                raise DataMissingValuesError, msg
4241            else:
4242                elevation_grid = elevation_grid*(missing==0) \
4243                                 + missing*elevation_NaN_filler
4244
4245        if verbose and j % ((n+10)/10) == 0: print '  Doing %d of %d' % (j, n)
4246
4247        i = 0
4248        for k in range(number_of_latitudes):      #Y direction
4249            for l in range(number_of_longitudes): #X direction
4250                w = zscale*elevation_grid[k,l] + mean_stage
4251                stage[j,i] = w
4252                h = w - z[i]
4253                xmomentum[j,i] = u_momentum_grid[k,l]*h
4254                ymomentum[j,i] = v_momentum_grid[k,l]*h
4255                i += 1
4256
4257    outfile.close()
4258
4259
4260##
4261# @brief Return max&min indexes (for slicing) of area specified.
4262# @param latitudes_ref ??
4263# @param longitudes_ref ??
4264# @param minlat Minimum latitude of specified area.
4265# @param maxlat Maximum latitude of specified area.
4266# @param minlon Minimum longitude of specified area.
4267# @param maxlon Maximum longitude of specified area.
4268# @return Tuple (lat_min_index, lat_max_index, lon_min_index, lon_max_index)
4269def _get_min_max_indexes(latitudes_ref,longitudes_ref,
4270                         minlat=None, maxlat=None,
4271                         minlon=None, maxlon=None):
4272    """
4273    Return max, min indexes (for slicing) of the lat and long arrays to cover
4274    the area specified with min/max lat/long.
4275
4276    Think of the latitudes and longitudes describing a 2d surface.
4277    The area returned is, if possible, just big enough to cover the
4278    inputed max/min area. (This will not be possible if the max/min area
4279    has a section outside of the latitudes/longitudes area.)
4280
4281    asset  longitudes are sorted,
4282    long - from low to high (west to east, eg 148 - 151)
4283    assert latitudes are sorted, ascending or decending
4284    """
4285
4286    latitudes = latitudes_ref[:]
4287    longitudes = longitudes_ref[:]
4288
4289    latitudes = ensure_numeric(latitudes)
4290    longitudes = ensure_numeric(longitudes)
4291
4292    assert num.allclose(num.sort(longitudes), longitudes)
4293
4294    #print latitudes[0],longitudes[0]
4295    #print len(latitudes),len(longitudes)
4296    #print latitudes[len(latitudes)-1],longitudes[len(longitudes)-1]
4297
4298    lat_ascending = True
4299    if not num.allclose(num.sort(latitudes), latitudes):
4300        lat_ascending = False
4301        # reverse order of lat, so it's in ascending order
4302        latitudes = latitudes[::-1]
4303        assert num.allclose(num.sort(latitudes), latitudes)
4304
4305    largest_lat_index = len(latitudes)-1
4306
4307    #Cut out a smaller extent.
4308    if minlat == None:
4309        lat_min_index = 0
4310    else:
4311        lat_min_index = num.searchsorted(latitudes, minlat)-1
4312        if lat_min_index <0:
4313            lat_min_index = 0
4314
4315    if maxlat == None:
4316        lat_max_index = largest_lat_index #len(latitudes)
4317    else:
4318        lat_max_index = num.searchsorted(latitudes, maxlat)
4319        if lat_max_index > largest_lat_index:
4320            lat_max_index = largest_lat_index
4321
4322    if minlon == None:
4323        lon_min_index = 0
4324    else:
4325        lon_min_index = num.searchsorted(longitudes, minlon)-1
4326        if lon_min_index <0:
4327            lon_min_index = 0
4328
4329    if maxlon == None:
4330        lon_max_index = len(longitudes)
4331    else:
4332        lon_max_index = num.searchsorted(longitudes, maxlon)
4333
4334    # Reversing the indexes, if the lat array is decending
4335    if lat_ascending is False:
4336        lat_min_index, lat_max_index = largest_lat_index - lat_max_index, \
4337                                       largest_lat_index - lat_min_index
4338    lat_max_index = lat_max_index + 1 # taking into account how slicing works
4339    lon_max_index = lon_max_index + 1 # taking into account how slicing works
4340
4341    return lat_min_index, lat_max_index, lon_min_index, lon_max_index
4342
4343
4344##
4345# @brief Read an ASC file.
4346# @parem filename Path to the file to read.
4347# @param verbose True if this function is to be verbose.
4348def _read_asc(filename, verbose=False):
4349    """Read esri file from the following ASCII format (.asc)
4350
4351    Example:
4352
4353    ncols         3121
4354    nrows         1800
4355    xllcorner     722000
4356    yllcorner     5893000
4357    cellsize      25
4358    NODATA_value  -9999
4359    138.3698 137.4194 136.5062 135.5558 ..........
4360    """
4361
4362    datafile = open(filename)
4363
4364    if verbose: print 'Reading DEM from %s' % filename
4365
4366    lines = datafile.readlines()
4367    datafile.close()
4368
4369    if verbose: print 'Got', len(lines), ' lines'
4370
4371    ncols = int(lines.pop(0).split()[1].strip())
4372    nrows = int(lines.pop(0).split()[1].strip())
4373    xllcorner = float(lines.pop(0).split()[1].strip())
4374    yllcorner = float(lines.pop(0).split()[1].strip())
4375    cellsize = float(lines.pop(0).split()[1].strip())
4376    NODATA_value = float(lines.pop(0).split()[1].strip())
4377
4378    assert len(lines) == nrows
4379
4380    #Store data
4381    grid = []
4382
4383    n = len(lines)
4384    for i, line in enumerate(lines):
4385        cells = line.split()
4386        assert len(cells) == ncols
4387        grid.append(num.array([float(x) for x in cells]))
4388    grid = num.array(grid)
4389
4390    return {'xllcorner':xllcorner,
4391            'yllcorner':yllcorner,
4392            'cellsize':cellsize,
4393            'NODATA_value':NODATA_value}, grid
4394
4395
4396    ####  URS 2 SWW  ###
4397
4398# Definitions of various NetCDF dimension names, etc.
4399lon_name = 'LON'
4400lat_name = 'LAT'
4401time_name = 'TIME'
4402precision = num.Float # So if we want to change the precision its done here
4403
4404##
4405# @brief Clas for a NetCDF data file writer.
4406class Write_nc:
4407    """Write an nc file.
4408
4409    Note, this should be checked to meet cdc netcdf conventions for gridded
4410    data. http://www.cdc.noaa.gov/cdc/conventions/cdc_netcdf_standard.shtml
4411    """
4412
4413    ##
4414    # @brief Instantiate a Write_nc instance.
4415    # @param quantity_name
4416    # @param file_name
4417    # @param time_step_count The number of time steps.
4418    # @param time_step The time_step size.
4419    # @param lon
4420    # @param lat
4421    def __init__(self,
4422                 quantity_name,
4423                 file_name,
4424                 time_step_count,
4425                 time_step,
4426                 lon,
4427                 lat):
4428        """Instantiate a Write_nc instance (NetCDF file writer).
4429
4430        time_step_count is the number of time steps.
4431        time_step is the time step size
4432
4433        pre-condition: quantity_name must be 'HA', 'UA'or 'VA'.
4434        """
4435
4436        self.quantity_name = quantity_name
4437        quantity_units = {'HA':'CENTIMETERS',
4438                          'UA':'CENTIMETERS/SECOND',
4439                          'VA':'CENTIMETERS/SECOND'}
4440
4441        multiplier_dic = {'HA':100.0,   # To convert from m to cm
4442                          'UA':100.0,   #             and m/s to cm/sec
4443                          'VA':-100.0}  # MUX files have positive x in the
4444                                        # Southern direction.  This corrects
4445                                        # for it, when writing nc files.
4446
4447        self.quantity_multiplier =  multiplier_dic[self.quantity_name]
4448
4449        #self.file_name = file_name
4450        self.time_step_count = time_step_count
4451        self.time_step = time_step
4452
4453        # NetCDF file definition
4454        self.outfile = NetCDFFile(file_name, netcdf_mode_w)
4455        outfile = self.outfile
4456
4457        #Create new file
4458        nc_lon_lat_header(outfile, lon, lat)
4459
4460        # TIME
4461        outfile.createDimension(time_name, None)
4462        outfile.createVariable(time_name, precision, (time_name,))
4463
4464        #QUANTITY
4465        outfile.createVariable(self.quantity_name, precision,
4466                               (time_name, lat_name, lon_name))
4467        outfile.variables[self.quantity_name].missing_value = -1.e+034
4468        outfile.variables[self.quantity_name].units = \
4469                                 quantity_units[self.quantity_name]
4470        outfile.variables[lon_name][:]= ensure_numeric(lon)
4471        outfile.variables[lat_name][:]= ensure_numeric(lat)
4472
4473        #Assume no one will be wanting to read this, while we are writing
4474        #outfile.close()
4475
4476    ##
4477    # @brief Write a time-step of quantity data.
4478    # @param quantity_slice The data to be stored for this time-step.
4479    def store_timestep(self, quantity_slice):
4480        """Write a time slice of quantity info
4481
4482        quantity_slice is the data to be stored at this time step
4483        """
4484
4485        # Get the variables
4486        time = self.outfile.variables[time_name]
4487        quantity = self.outfile.variables[self.quantity_name]
4488
4489        # get index oflice to write
4490        i = len(time)
4491
4492        #Store time
4493        time[i] = i * self.time_step    #self.domain.time
4494        quantity[i,:] = quantity_slice * self.quantity_multiplier
4495
4496    ##
4497    # @brief Close file underlying the class instance.
4498    def close(self):
4499        self.outfile.close()
4500
4501
4502##
4503# @brief Convert URS file to SWW file.
4504# @param basename_in Stem of the input filename.
4505# @param basename_out Stem of the output filename.
4506# @param verbose True if this function is to be verbose.
4507# @param remove_nc_files
4508# @param minlat Sets extent of area to be used.  If not supplied, full extent.
4509# @param maxlat Sets extent of area to be used.  If not supplied, full extent.
4510# @param minlon Sets extent of area to be used.  If not supplied, full extent.
4511# @param maxlon Sets extent of area to be used.  If not supplied, full extent.
4512# @param mint
4513# @param maxt
4514# @param mean_stage
4515# @param origin A 3-tuple with geo referenced UTM coordinates
4516# @param zscale
4517# @param fail_on_NaN
4518# @param NaN_filler
4519# @param elevation
4520# @note Also convert latitude and longitude to UTM. All coordinates are
4521#       assumed to be given in the GDA94 datum.
4522def urs2sww(basename_in='o', basename_out=None, verbose=False,
4523            remove_nc_files=True,
4524            minlat=None, maxlat=None,
4525            minlon=None, maxlon=None,
4526            mint=None, maxt=None,
4527            mean_stage=0,
4528            origin=None,
4529            zscale=1,
4530            fail_on_NaN=True,
4531            NaN_filler=0,
4532            elevation=None):
4533    """Convert a URS file to an SWW file.
4534    Convert URS C binary format for wave propagation to
4535    sww format native to abstract_2d_finite_volumes.
4536
4537    Specify only basename_in and read files of the form
4538    basefilename-z-mux2, basefilename-e-mux2 and
4539    basefilename-n-mux2 containing relative height,
4540    x-velocity and y-velocity, respectively.
4541
4542    Also convert latitude and longitude to UTM. All coordinates are
4543    assumed to be given in the GDA94 datum. The latitude and longitude
4544    information is for  a grid.
4545
4546    min's and max's: If omitted - full extend is used.
4547    To include a value min may equal it, while max must exceed it.
4548    Lat and lon are assumed to be in decimal degrees.
4549    NOTE: minlon is the most east boundary.
4550
4551    origin is a 3-tuple with geo referenced
4552    UTM coordinates (zone, easting, northing)
4553    It will be the origin of the sww file. This shouldn't be used,
4554    since all of anuga should be able to handle an arbitary origin.
4555
4556    URS C binary format has data orgainised as TIME, LONGITUDE, LATITUDE
4557    which means that latitude is the fastest
4558    varying dimension (row major order, so to speak)
4559
4560    In URS C binary the latitudes and longitudes are in assending order.
4561    """
4562
4563    if basename_out == None:
4564        basename_out = basename_in
4565
4566    files_out = urs2nc(basename_in, basename_out)
4567
4568    ferret2sww(basename_out,
4569               minlat=minlat,
4570               maxlat=maxlat,
4571               minlon=minlon,
4572               maxlon=maxlon,
4573               mint=mint,
4574               maxt=maxt,
4575               mean_stage=mean_stage,
4576               origin=origin,
4577               zscale=zscale,
4578               fail_on_NaN=fail_on_NaN,
4579               NaN_filler=NaN_filler,
4580               inverted_bathymetry=True,
4581               verbose=verbose)
4582   
4583    if remove_nc_files:
4584        for file_out in files_out:
4585            os.remove(file_out)
4586
4587
4588##
4589# @brief Convert 3 URS files back to 4 NC files.
4590# @param basename_in Stem of the input filenames.
4591# @param basename_outStem of the output filenames.
4592# @note The name of the urs file names must be:
4593#          [basename_in]-z-mux
4594#          [basename_in]-e-mux
4595#          [basename_in]-n-mux
4596def urs2nc(basename_in='o', basename_out='urs'):
4597    """Convert the 3 urs files to 4 nc files.
4598
4599    The name of the urs file names must be;
4600    [basename_in]-z-mux
4601    [basename_in]-e-mux
4602    [basename_in]-n-mux
4603    """
4604
4605    files_in = [basename_in + WAVEHEIGHT_MUX_LABEL,
4606                basename_in + EAST_VELOCITY_LABEL,
4607                basename_in + NORTH_VELOCITY_LABEL]
4608    files_out = [basename_out + '_ha.nc',
4609                 basename_out + '_ua.nc',
4610                 basename_out + '_va.nc']
4611    quantities = ['HA', 'UA', 'VA']
4612
4613    #if os.access(files_in[0]+'.mux', os.F_OK) == 0 :
4614    for i, file_name in enumerate(files_in):
4615        if os.access(file_name, os.F_OK) == 0:
4616            if os.access(file_name + '.mux', os.F_OK) == 0 :
4617                msg = 'File %s does not exist or is not accessible' % file_name
4618                raise IOError, msg
4619            else:
4620               files_in[i] += '.mux'
4621               print "file_name", file_name
4622
4623    hashed_elevation = None
4624    for file_in, file_out, quantity in map(None, files_in,
4625                                           files_out,
4626                                           quantities):
4627        lonlatdep, lon, lat, depth = _binary_c2nc(file_in,
4628                                                  file_out,
4629                                                  quantity)
4630        if hashed_elevation == None:
4631            elevation_file = basename_out + '_e.nc'
4632            write_elevation_nc(elevation_file,
4633                               lon,
4634                               lat,
4635                               depth)
4636            hashed_elevation = myhash(lonlatdep)
4637        else:
4638            msg = "The elevation information in the mux files is inconsistent"
4639            assert hashed_elevation == myhash(lonlatdep), msg
4640
4641    files_out.append(elevation_file)
4642
4643    return files_out
4644
4645
4646##
4647# @brief Convert a quantity URS file to a NetCDF file.
4648# @param file_in Path to input URS file.
4649# @param file_out Path to the output file.
4650# @param quantity Name of the quantity to be written to the output file.
4651# @return A tuple (lonlatdep, lon, lat, depth).
4652def _binary_c2nc(file_in, file_out, quantity):
4653    """Reads in a quantity urs file and writes a quantity nc file.
4654    Additionally, returns the depth and lat, long info,
4655    so it can be written to a file.
4656    """
4657
4658    columns = 3                           # long, lat , depth
4659    mux_file = open(file_in, 'rb')
4660
4661    # Number of points/stations
4662    (points_num,) = unpack('i', mux_file.read(4))
4663
4664    # nt, int - Number of time steps
4665    (time_step_count,) = unpack('i', mux_file.read(4))
4666
4667    #dt, float - time step, seconds
4668    (time_step,) = unpack('f', mux_file.read(4))
4669
4670    msg = "Bad data in the mux file."
4671    if points_num < 0:
4672        mux_file.close()
4673        raise ANUGAError, msg
4674    if time_step_count < 0:
4675        mux_file.close()
4676        raise ANUGAError, msg
4677    if time_step < 0:
4678        mux_file.close()
4679        raise ANUGAError, msg
4680
4681    lonlatdep = p_array.array('f')
4682    lonlatdep.read(mux_file, columns * points_num)
4683    lonlatdep = num.array(lonlatdep, typecode=num.Float)
4684    lonlatdep = num.reshape(lonlatdep, (points_num, columns))
4685
4686    lon, lat, depth = lon_lat2grid(lonlatdep)
4687    lon_sorted = list(lon)
4688    lon_sorted.sort()
4689
4690    if not lon == lon_sorted:
4691        msg = "Longitudes in mux file are not in ascending order"
4692        raise IOError, msg
4693
4694    lat_sorted = list(lat)
4695    lat_sorted.sort()
4696
4697# UNUSED?
4698##    if not lat == lat_sorted:
4699##        msg = "Latitudes in mux file are not in ascending order"
4700
4701    nc_file = Write_nc(quantity,
4702                       file_out,
4703                       time_step_count,
4704                       time_step,
4705                       lon,
4706                       lat)
4707
4708    for i in range(time_step_count):
4709        #Read in a time slice from mux file
4710        hz_p_array = p_array.array('f')
4711        hz_p_array.read(mux_file, points_num)
4712        hz_p = num.array(hz_p_array, typecode=num.Float)
4713        hz_p = num.reshape(hz_p, (len(lon), len(lat)))
4714        hz_p = num.transpose(hz_p)  # mux has lat varying fastest, nc has long v.f.
4715
4716        #write time slice to nc file
4717        nc_file.store_timestep(hz_p)
4718
4719    mux_file.close()
4720    nc_file.close()
4721
4722    return lonlatdep, lon, lat, depth
4723
4724
4725##
4726# @brief Write an NC elevation file.
4727# @param file_out Path to the output file.
4728# @param lon ??
4729# @param lat ??
4730# @param depth_vector The elevation data to write.
4731def write_elevation_nc(file_out, lon, lat, depth_vector):
4732    """Write an nc elevation file."""
4733
4734    # NetCDF file definition
4735    outfile = NetCDFFile(file_out, netcdf_mode_w)
4736
4737    #Create new file
4738    nc_lon_lat_header(outfile, lon, lat)
4739
4740    # ELEVATION
4741    zname = 'ELEVATION'
4742    outfile.createVariable(zname, precision, (lat_name, lon_name))
4743    outfile.variables[zname].units = 'CENTIMETERS'
4744    outfile.variables[zname].missing_value = -1.e+034
4745
4746    outfile.variables[lon_name][:] = ensure_numeric(lon)
4747    outfile.variables[lat_name][:] = ensure_numeric(lat)
4748
4749    depth = num.reshape(depth_vector, (len(lat), len(lon)))
4750    outfile.variables[zname][:] = depth
4751
4752    outfile.close()
4753
4754
4755##
4756# @brief Write lat/lon headers to a NetCDF file.
4757# @param outfile Handle to open file to write to.
4758# @param lon An iterable of the longitudes.
4759# @param lat An iterable of the latitudes.
4760# @note Defines lat/long dimensions and variables. Sets various attributes:
4761#          .point_spacing  and  .units
4762#       and writes lat/lon data.
4763
4764def nc_lon_lat_header(outfile, lon, lat):
4765    """Write lat/lon headers to a NetCDF file.
4766
4767    outfile is the netcdf file handle.
4768    lon - a list/array of the longitudes
4769    lat - a list/array of the latitudes
4770    """
4771
4772    outfile.institution = 'Geoscience Australia'
4773    outfile.description = 'Converted from URS binary C'
4774
4775    # Longitude
4776    outfile.createDimension(lon_name, len(lon))
4777    outfile.createVariable(lon_name, precision, (lon_name,))
4778    outfile.variables[lon_name].point_spacing = 'uneven'
4779    outfile.variables[lon_name].units = 'degrees_east'
4780    outfile.variables[lon_name].assignValue(lon)
4781
4782    # Latitude
4783    outfile.createDimension(lat_name, len(lat))
4784    outfile.createVariable(lat_name, precision, (lat_name,))
4785    outfile.variables[lat_name].point_spacing = 'uneven'
4786    outfile.variables[lat_name].units = 'degrees_north'
4787    outfile.variables[lat_name].assignValue(lat)
4788
4789
4790##
4791# @brief
4792# @param long_lat_dep
4793# @return A tuple (long, lat, quantity).
4794# @note The latitude is the fastest varying dimension - in mux files.
4795def lon_lat2grid(long_lat_dep):
4796    """
4797    given a list of points that are assumed to be an a grid,
4798    return the long's and lat's of the grid.
4799    long_lat_dep is an array where each row is a position.
4800    The first column is longitudes.
4801    The second column is latitudes.
4802
4803    The latitude is the fastest varying dimension - in mux files
4804    """
4805
4806    LONG = 0
4807    LAT = 1
4808    QUANTITY = 2
4809
4810    long_lat_dep = ensure_numeric(long_lat_dep, num.Float)
4811
4812    num_points = long_lat_dep.shape[0]
4813    this_rows_long = long_lat_dep[0,LONG]
4814
4815    # Count the length of unique latitudes
4816    i = 0
4817    while long_lat_dep[i,LONG] == this_rows_long and i < num_points:
4818        i += 1
4819
4820    # determine the lats and longsfrom the grid
4821    lat = long_lat_dep[:i, LAT]
4822    long = long_lat_dep[::i, LONG]
4823
4824    lenlong = len(long)
4825    lenlat = len(lat)
4826
4827    msg = 'Input data is not gridded'
4828    assert num_points % lenlat == 0, msg
4829    assert num_points % lenlong == 0, msg
4830
4831    # Test that data is gridded
4832    for i in range(lenlong):
4833        msg = 'Data is not gridded.  It must be for this operation'
4834        first = i * lenlat
4835        last = first + lenlat
4836
4837        assert num.allclose(long_lat_dep[first:last,LAT], lat), msg
4838        assert num.allclose(long_lat_dep[first:last,LONG], long[i]), msg
4839
4840    msg = 'Out of range latitudes/longitudes'
4841    for l in lat:assert -90 < l < 90 , msg
4842    for l in long:assert -180 < l < 180 , msg
4843
4844    # Changing quantity from lat being the fastest varying dimension to
4845    # long being the fastest varying dimension
4846    # FIXME - make this faster/do this a better way
4847    # use numeric transpose, after reshaping the quantity vector
4848    quantity = num.zeros(num_points, num.Float)
4849
4850    for lat_i, _ in enumerate(lat):
4851        for long_i, _ in enumerate(long):
4852            q_index = lat_i*lenlong + long_i
4853            lld_index = long_i*lenlat + lat_i
4854            temp = long_lat_dep[lld_index, QUANTITY]
4855            quantity[q_index] = temp
4856
4857    return long, lat, quantity
4858
4859################################################################################
4860# END URS 2 SWW
4861################################################################################
4862
4863################################################################################
4864# URS UNGRIDDED 2 SWW
4865################################################################################
4866
4867### PRODUCING THE POINTS NEEDED FILE ###
4868
4869# Ones used for FESA 2007 results
4870#LL_LAT = -50.0
4871#LL_LONG = 80.0
4872#GRID_SPACING = 1.0/60.0
4873#LAT_AMOUNT = 4800
4874#LONG_AMOUNT = 3600
4875
4876
4877##
4878# @brief
4879# @param file_name
4880# @param boundary_polygon
4881# @param zone
4882# @param ll_lat
4883# @param ll_long
4884# @param grid_spacing
4885# @param lat_amount
4886# @param long_amount
4887# @param isSouthernHemisphere
4888# @param export_csv
4889# @param use_cache
4890# @param verbose True if this function is to be verbose.
4891# @return
4892def URS_points_needed_to_file(file_name, boundary_polygon, zone,
4893                              ll_lat, ll_long,
4894                              grid_spacing,
4895                              lat_amount, long_amount,
4896                              isSouthernHemisphere=True,
4897                              export_csv=False, use_cache=False,
4898                              verbose=False):
4899    """
4900    Given the info to replicate the URS grid and a polygon output
4901    a file that specifies the cloud of boundary points for URS.
4902
4903    This creates a .urs file.  This is in the format used by URS;
4904    1st line is the number of points,
4905    each line after represents a point,in lats and longs.
4906
4907    Note: The polygon cannot cross zones or hemispheres.
4908
4909    A work-a-round for different zones or hemispheres is to run this twice,
4910    once for each zone, and then combine the output.
4911
4912    file_name - name of the urs file produced for David.
4913    boundary_polygon - a list of points that describes a polygon.
4914                      The last point is assumed ot join the first point.
4915                      This is in UTM (lat long would be better though)
4916
4917     This is info about the URS model that needs to be inputted.
4918
4919    ll_lat - lower left latitude, in decimal degrees
4920    ll-long - lower left longitude, in decimal degrees
4921    grid_spacing - in deciamal degrees
4922    lat_amount - number of latitudes
4923    long_amount- number of longs
4924
4925    Don't add the file extension.  It will be added.
4926    """
4927
4928    geo = URS_points_needed(boundary_polygon, zone, ll_lat, ll_long,
4929                            grid_spacing,
4930                            lat_amount, long_amount, isSouthernHemisphere,
4931                            use_cache, verbose)
4932
4933    if not file_name[-4:] == ".urs":
4934        file_name += ".urs"
4935
4936    geo.export_points_file(file_name, isSouthHemisphere=isSouthernHemisphere)
4937
4938    if export_csv:
4939        if file_name[-4:] == ".urs":
4940            file_name = file_name[:-4] + ".csv"
4941        geo.export_points_file(file_name)
4942
4943    return geo
4944
4945
4946##
4947# @brief
4948# @param boundary_polygon
4949# @param zone
4950# @param ll_lat
4951# @param ll_long
4952# @param grid_spacing
4953# @param lat_amount
4954# @param long_amount
4955# @param isSouthHemisphere
4956# @param use_cache
4957# @param verbose
4958def URS_points_needed(boundary_polygon, zone, ll_lat,
4959                      ll_long, grid_spacing,
4960                      lat_amount, long_amount, isSouthHemisphere=True,
4961                      use_cache=False, verbose=False):
4962    args = (boundary_polygon,
4963            zone, ll_lat,
4964            ll_long, grid_spacing,
4965            lat_amount, long_amount, isSouthHemisphere)
4966    kwargs = {}
4967
4968    if use_cache is True:
4969        try:
4970            from anuga.caching import cache
4971        except:
4972            msg = 'Caching was requested, but caching module' \
4973                  'could not be imported'
4974            raise msg
4975
4976        geo = cache(_URS_points_needed,
4977                    args, kwargs,
4978                    verbose=verbose,
4979                    compression=False)
4980    else:
4981        geo = apply(_URS_points_needed, args, kwargs)
4982
4983    return geo
4984
4985
4986##
4987# @brief
4988# @param boundary_polygon An iterable of points that describe a polygon.
4989# @param zone
4990# @param ll_lat Lower left latitude, in decimal degrees
4991# @param ll_long Lower left longitude, in decimal degrees
4992# @param grid_spacing Grid spacing in decimal degrees.
4993# @param lat_amount
4994# @param long_amount
4995# @param isSouthHemisphere
4996def _URS_points_needed(boundary_polygon,
4997                       zone, ll_lat,
4998                       ll_long, grid_spacing,
4999                       lat_amount, long_amount,
5000                       isSouthHemisphere):
5001    """
5002    boundary_polygon - a list of points that describes a polygon.
5003                      The last point is assumed ot join the first point.
5004                      This is in UTM (lat long would b better though)
5005
5006    ll_lat - lower left latitude, in decimal degrees
5007    ll-long - lower left longitude, in decimal degrees
5008    grid_spacing - in decimal degrees
5009
5010    """
5011
5012    msg = "grid_spacing can not be zero"
5013    assert not grid_spacing == 0, msg
5014
5015    a = boundary_polygon
5016
5017    # List of segments.  Each segment is two points.
5018    segs = [i and [a[i-1], a[i]] or [a[len(a)-1], a[0]] for i in range(len(a))]
5019
5020    # convert the segs to Lat's and longs.
5021    # Don't assume the zone of the segments is the same as the lower left
5022    # corner of the lat long data!!  They can easily be in different zones
5023    lat_long_set = frozenset()
5024    for seg in segs:
5025        points_lat_long = points_needed(seg, ll_lat, ll_long, grid_spacing,
5026                                        lat_amount, long_amount, zone,
5027                                        isSouthHemisphere)
5028        lat_long_set |= frozenset(points_lat_long)
5029
5030    if lat_long_set == frozenset([]):
5031        msg = "URS region specified and polygon does not overlap."
5032        raise ValueError, msg
5033
5034    # Warning there is no info in geospatial saying the hemisphere of
5035    # these points.  There should be.
5036    geo = Geospatial_data(data_points=list(lat_long_set),
5037                          points_are_lats_longs=True)
5038
5039    return geo
5040
5041
5042##
5043# @brief Get the points that are needed to interpolate any point a a segment.
5044# @param seg Two points in the UTM.
5045# @param ll_lat Lower left latitude, in decimal degrees
5046# @param ll_long Lower left longitude, in decimal degrees
5047# @param grid_spacing
5048# @param lat_amount
5049# @param long_amount
5050# @param zone
5051# @param isSouthHemisphere
5052# @return A list of points.
5053def points_needed(seg, ll_lat, ll_long, grid_spacing,
5054                  lat_amount, long_amount, zone,
5055                  isSouthHemisphere):
5056    """
5057    seg is two points, in UTM
5058    return a list of the points, in lats and longs that are needed to
5059    interpolate any point on the segment.
5060    """
5061
5062    from math import sqrt
5063
5064    geo_reference = Geo_reference(zone=zone)
5065    geo = Geospatial_data(seg, geo_reference=geo_reference)
5066    seg_lat_long = geo.get_data_points(as_lat_long=True,
5067                                       isSouthHemisphere=isSouthHemisphere)
5068
5069    # 1.415 = 2^0.5, rounded up....
5070    sqrt_2_rounded_up = 1.415
5071    buffer = sqrt_2_rounded_up * grid_spacing
5072
5073    max_lat = max(seg_lat_long[0][0], seg_lat_long[1][0]) + buffer
5074    max_long = max(seg_lat_long[0][1], seg_lat_long[1][1]) + buffer
5075    min_lat = min(seg_lat_long[0][0], seg_lat_long[1][0]) - buffer
5076    min_long = min(seg_lat_long[0][1], seg_lat_long[1][1]) - buffer
5077
5078    first_row = (min_long - ll_long) / grid_spacing
5079
5080    # To round up
5081    first_row_long = int(round(first_row + 0.5))
5082
5083    last_row = (max_long - ll_long) / grid_spacing # round down
5084    last_row_long = int(round(last_row))
5085
5086    first_row = (min_lat - ll_lat) / grid_spacing
5087    # To round up
5088    first_row_lat = int(round(first_row + 0.5))
5089
5090    last_row = (max_lat - ll_lat) / grid_spacing # round down
5091    last_row_lat = int(round(last_row))
5092
5093    max_distance = 157147.4112 * grid_spacing
5094    points_lat_long = []
5095
5096    # Create a list of the lat long points to include.
5097    for index_lat in range(first_row_lat, last_row_lat + 1):
5098        for index_long in range(first_row_long, last_row_long + 1):
5099            lat = ll_lat + index_lat*grid_spacing
5100            long = ll_long + index_long*grid_spacing
5101
5102            #filter here to keep good points
5103            if keep_point(lat, long, seg, max_distance):
5104                points_lat_long.append((lat, long)) #must be hashable
5105
5106    # Now that we have these points, lets throw ones out that are too far away
5107    return points_lat_long
5108
5109
5110##
5111# @brief
5112# @param lat
5113# @param long
5114# @param seg Two points in UTM.
5115# @param max_distance
5116def keep_point(lat, long, seg, max_distance):
5117    """
5118    seg is two points, UTM
5119    """
5120
5121    from math import sqrt
5122
5123    _ , x0, y0 = redfearn(lat, long)
5124    x1 = seg[0][0]
5125    y1 = seg[0][1]
5126    x2 = seg[1][0]
5127    y2 = seg[1][1]
5128    x2_1 = x2-x1
5129    y2_1 = y2-y1
5130    try:
5131        d = abs((x2_1)*(y1-y0)-(x1-x0)*(y2_1))/sqrt( \
5132            (x2_1)*(x2_1)+(y2_1)*(y2_1))
5133    except ZeroDivisionError:
5134        if sqrt((x2_1)*(x2_1)+(y2_1)*(y2_1)) == 0 \
5135           and abs((x2_1)*(y1-y0)-(x1-x0)*(y2_1)) == 0:
5136            return True
5137        else:
5138            return False
5139
5140    return d <= max_distance
5141
5142################################################################################
5143# CONVERTING UNGRIDDED URS DATA TO AN SWW FILE
5144################################################################################
5145
5146WAVEHEIGHT_MUX_LABEL = '-z-mux'
5147EAST_VELOCITY_LABEL =  '-e-mux'
5148NORTH_VELOCITY_LABEL =  '-n-mux'
5149
5150##
5151# @brief Convert URS file(s) (wave prop) to an SWW file.
5152# @param basename_in Stem of the input filenames.
5153# @param basename_out Path to the output SWW file.
5154# @param verbose True if this function is to be verbose.
5155# @param mint
5156# @param maxt
5157# @param mean_stage
5158# @param origin Tuple with geo-ref UTM coordinates (zone, easting, northing).
5159# @param hole_points_UTM
5160# @param zscale
5161# @note Also convert latitude and longitude to UTM. All coordinates are
5162#       assumed to be given in the GDA94 datum.
5163# @note Input filename stem has suffixes '-z-mux', '-e-mux' and '-n-mux'
5164#       added for relative height, x-velocity and y-velocity respectively.
5165def urs_ungridded2sww(basename_in='o', basename_out=None, verbose=False,
5166                      mint=None, maxt=None,
5167                      mean_stage=0,
5168                      origin=None,
5169                      hole_points_UTM=None,
5170                      zscale=1):
5171    """
5172    Convert URS C binary format for wave propagation to
5173    sww format native to abstract_2d_finite_volumes.
5174
5175    Specify only basename_in and read files of the form
5176    basefilename-z-mux, basefilename-e-mux and
5177    basefilename-n-mux containing relative height,
5178    x-velocity and y-velocity, respectively.
5179
5180    Also convert latitude and longitude to UTM. All coordinates are
5181    assumed to be given in the GDA94 datum. The latitude and longitude
5182    information is assumed ungridded grid.
5183
5184    min's and max's: If omitted - full extend is used.
5185    To include a value min ans max may equal it.
5186    Lat and lon are assumed to be in decimal degrees.
5187
5188    origin is a 3-tuple with geo referenced
5189    UTM coordinates (zone, easting, northing)
5190    It will be the origin of the sww file. This shouldn't be used,
5191    since all of anuga should be able to handle an arbitary origin.
5192    The mux point info is NOT relative to this origin.
5193
5194    URS C binary format has data organised as TIME, LONGITUDE, LATITUDE
5195    which means that latitude is the fastest
5196    varying dimension (row major order, so to speak)
5197
5198    In URS C binary the latitudes and longitudes are in assending order.
5199
5200    Note, interpolations of the resulting sww file will be different
5201    from results of urs2sww.  This is due to the interpolation
5202    function used, and the different grid structure between urs2sww
5203    and this function.
5204
5205    Interpolating data that has an underlying gridded source can
5206    easily end up with different values, depending on the underlying
5207    mesh.
5208
5209    consider these 4 points
5210    50  -50
5211
5212    0     0
5213
5214    The grid can be
5215     -
5216    |\|   A
5217     -
5218     or;
5219      -
5220     |/|  B
5221      -
5222
5223    If a point is just below the center of the midpoint, it will have a
5224    +ve value in grid A and a -ve value in grid B.
5225    """
5226
5227    from anuga.mesh_engine.mesh_engine import NoTrianglesError
5228    from anuga.pmesh.mesh import Mesh
5229
5230    files_in = [basename_in + WAVEHEIGHT_MUX_LABEL,
5231                basename_in + EAST_VELOCITY_LABEL,
5232                basename_in + NORTH_VELOCITY_LABEL]
5233    quantities = ['HA','UA','VA']
5234
5235    # instantiate urs_points of the three mux files.
5236    mux = {}
5237    for quantity, file in map(None, quantities, files_in):
5238        mux[quantity] = Urs_points(file)
5239
5240    # Could check that the depth is the same. (hashing)
5241
5242    # handle to a mux file to do depth stuff
5243    a_mux = mux[quantities[0]]
5244
5245    # Convert to utm
5246    lat = a_mux.lonlatdep[:,1]
5247    long = a_mux.lonlatdep[:,0]
5248    points_utm, zone = convert_from_latlon_to_utm(latitudes=lat,
5249                                                  longitudes=long)
5250
5251    elevation = a_mux.lonlatdep[:,2] * -1
5252
5253    # grid (create a mesh from the selected points)
5254    # This mesh has a problem.  Triangles are streched over ungridded areas.
5255    # If these areas could be described as holes in pmesh, that would be great.
5256
5257    # I can't just get the user to selection a point in the middle.
5258    # A boundary is needed around these points.
5259    # But if the zone of points is obvious enough auto-segment should do
5260    # a good boundary.
5261    mesh = Mesh()
5262    mesh.add_vertices(points_utm)
5263    mesh.auto_segment(smooth_indents=True, expand_pinch=True)
5264
5265    # To try and avoid alpha shape 'hugging' too much
5266    mesh.auto_segment(mesh.shape.get_alpha() * 1.1)
5267    if hole_points_UTM is not None:
5268        point = ensure_absolute(hole_points_UTM)
5269        mesh.add_hole(point[0], point[1])
5270
5271    try:
5272        mesh.generate_mesh(minimum_triangle_angle=0.0, verbose=False)
5273    except NoTrianglesError:
5274        # This is a bit of a hack, going in and changing the data structure.
5275        mesh.holes = []
5276        mesh.generate_mesh(minimum_triangle_angle=0.0, verbose=False)
5277
5278    mesh_dic = mesh.Mesh2MeshList()
5279
5280    #mesh.export_mesh_file(basename_in + '_168.tsh')
5281    #import sys; sys.exit()
5282    # These are the times of the mux file
5283    mux_times = []
5284    for i in range(a_mux.time_step_count):
5285        mux_times.append(a_mux.time_step * i)
5286    (mux_times_start_i, mux_times_fin_i) = mux2sww_time(mux_times, mint, maxt)
5287    times = mux_times[mux_times_start_i:mux_times_fin_i]
5288
5289    if mux_times_start_i == mux_times_fin_i:
5290        # Close the mux files
5291        for quantity, file in map(None, quantities, files_in):
5292            mux[quantity].close()
5293        msg = "Due to mint and maxt there's no time info in the boundary SWW."
5294        raise Exception, msg
5295
5296    # If this raise is removed there is currently no downstream errors
5297
5298    points_utm=ensure_numeric(points_utm)
5299    assert ensure_numeric(mesh_dic['generatedpointlist']) \
5300           == ensure_numeric(points_utm)
5301
5302    volumes = mesh_dic['generatedtrianglelist']
5303
5304    # write sww intro and grid stuff.
5305    if basename_out is None:
5306        swwname = basename_in + '.sww'
5307    else:
5308        swwname = basename_out + '.sww'
5309
5310    if verbose: print 'Output to ', swwname
5311
5312    outfile = NetCDFFile(swwname, netcdf_mode_w)
5313
5314    # For a different way of doing this, check out tsh2sww
5315    # work out sww_times and the index range this covers
5316    sww = Write_sww()
5317    sww.store_header(outfile, times, len(volumes), len(points_utm),
5318                     verbose=verbose, sww_precision=num.Float)
5319    outfile.mean_stage = mean_stage
5320    outfile.zscale = zscale
5321
5322    sww.store_triangulation(outfile, points_utm, volumes,
5323                            elevation, zone,  new_origin=origin,
5324                            verbose=verbose)
5325
5326    if verbose: print 'Converting quantities'
5327
5328    # Read in a time slice from each mux file and write it to the SWW file
5329    j = 0
5330    for ha, ua, va in map(None, mux['HA'], mux['UA'], mux['VA']):
5331        if j >= mux_times_start_i and j < mux_times_fin_i:
5332            stage = zscale*ha + mean_stage
5333            h = stage - elevation
5334            xmomentum = ua*h
5335            ymomentum = -1 * va * h # -1 since in mux files south is positive.
5336            sww.store_quantities(outfile,
5337                                 slice_index=j-mux_times_start_i,
5338                                 verbose=verbose,
5339                                 stage=stage,
5340                                 xmomentum=xmomentum,
5341                                 ymomentum=ymomentum,
5342                                 sww_precision=num.Float)
5343        j += 1
5344
5345    if verbose: sww.verbose_quantities(outfile)
5346
5347    outfile.close()
5348
5349    # Do some conversions while writing the sww file
5350
5351
5352################################################################################
5353# READ MUX2 FILES line of points
5354################################################################################
5355
5356WAVEHEIGHT_MUX2_LABEL = '-z-mux2'
5357EAST_VELOCITY_MUX2_LABEL = '-e-mux2'
5358NORTH_VELOCITY_MUX2_LABEL = '-n-mux2'
5359
5360##
5361# @brief
5362# @param filenames List of mux2 format input filenames.
5363# @param weights Weights associated with each source file.
5364# @param permutation The gauge numbers for which data is extracted.
5365# @param verbose True if this function is to be verbose.
5366# @return (times, latitudes, longitudes, elevation, quantity, starttime)
5367def read_mux2_py(filenames,
5368                 weights=None,
5369                 permutation=None,
5370                 verbose=False):
5371    """Access the mux files specified in the filenames list. Combine the
5372       data found therin as a weighted linear sum as specifed by the weights.
5373       If permutation is None or empty extract timeseries data for all gauges
5374       within the files.
5375
5376       Input:
5377           filenames:   List of filenames specifiying the file containing the
5378                        timeseries data (mux2 format) for each source
5379           weights:     Weighs associated with each source
5380                        (defaults to 1 for each source)
5381           permutation: Specifies the gauge numbers that for which data is to be
5382                        extracted
5383    """
5384
5385    from urs_ext import read_mux2
5386
5387    numSrc = len(filenames)
5388
5389    file_params = -1 * num.ones(3, num.Float)                    # [nsta,dt,nt]
5390
5391    # Convert verbose to int C flag
5392    if verbose:
5393        verbose=1
5394    else:
5395        verbose=0
5396
5397    if weights is None:
5398        weights = num.ones(numSrc)
5399
5400    if permutation is None:
5401        permutation = ensure_numeric([], num.Float)
5402
5403    # Call underlying C implementation urs2sts_ext.c
5404    data = read_mux2(numSrc, filenames, weights, file_params,
5405                     permutation, verbose)
5406
5407    msg = 'File parameter values were not read in correctly from c file'
5408    assert len(num.compress(file_params > 0, file_params)) != 0, msg
5409
5410    msg = 'The number of stations specifed in the c array and in the file ' \
5411          'are inconsistent'
5412    assert file_params[0] >= len(permutation), msg
5413
5414    msg = 'The number of stations returned is inconsistent with ' \
5415          'the requested number'
5416    assert len(permutation) == 0 or len(permutation) == data.shape[0], msg
5417
5418    nsta = int(file_params[0])
5419    msg = 'Must have at least one station'
5420    assert nsta > 0, msg
5421
5422    dt = file_params[1]
5423    msg = 'Must have a postive timestep'
5424    assert dt > 0, msg
5425
5426    nt = int(file_params[2])
5427    msg = 'Must have at least one gauge value'
5428    assert nt > 0, msg
5429
5430    OFFSET = 5 # Number of site parameters p passed back with data
5431               # p = [geolat,geolon,depth,start_tstep,finish_tstep]
5432
5433    # FIXME (Ole): What is the relationship with params and data.shape ?
5434    # It looks as if the following asserts should pass but they don't always
5435    #
5436    #msg = 'nt = %d, data.shape[1] == %d' %(nt, data.shape[1])
5437    #assert nt == data.shape[1] - OFFSET, msg
5438    #
5439    #msg = 'nsta = %d, data.shape[0] == %d' %(nsta, data.shape[0])
5440    #assert nsta == data.shape[0], msg
5441
5442    # Number of stations in ordering file
5443    number_of_selected_stations = data.shape[0]
5444
5445    # Index where data ends and parameters begin
5446    parameters_index = data.shape[1] - OFFSET
5447
5448    times = dt * num.arange(parameters_index)
5449    latitudes = num.zeros(number_of_selected_stations, num.Float)
5450    longitudes = num.zeros(number_of_selected_stations, num.Float)
5451    elevation = num.zeros(number_of_selected_stations, num.Float)
5452    quantity = num.zeros((number_of_selected_stations, parameters_index), num.Float)
5453
5454    starttime = 1e16
5455    for i in range(number_of_selected_stations):
5456        quantity[i][:] = data[i][:parameters_index]
5457        latitudes[i] = data[i][parameters_index]
5458        longitudes[i] = data[i][parameters_index+1]
5459        elevation[i] = -data[i][parameters_index+2]
5460        first_time_step = data[i][parameters_index+3]
5461        starttime = min(dt*first_time_step, starttime)
5462
5463    return times, latitudes, longitudes, elevation, quantity, starttime
5464
5465
5466##
5467# @brief ??
5468# @param mux_times ??
5469# @param mint ??
5470# @param maxt ??
5471# @return ??
5472def mux2sww_time(mux_times, mint, maxt):
5473    """
5474    """
5475
5476    if mint == None:
5477        mux_times_start_i = 0
5478    else:
5479        mux_times_start_i = num.searchsorted(mux_times, mint)
5480
5481    if maxt == None:
5482        mux_times_fin_i = len(mux_times)
5483    else:
5484        maxt += 0.5 # so if you specify a time where there is
5485                    # data that time will be included
5486        mux_times_fin_i = num.searchsorted(mux_times, maxt)
5487
5488    return mux_times_start_i, mux_times_fin_i
5489
5490
5491##
5492# @brief Convert a URS (mux2, wave propagation) file to an STS file.
5493# @param basename_in String (or list) of source file stems.
5494# @param basename_out Stem of output STS file path.
5495# @param weights
5496# @param verbose True if this function is to be verbose.
5497# @param origin Tuple with with geo-ref UTM coords (zone, easting, northing).
5498# @param zone
5499# @param mean_stage
5500# @param zscale
5501# @param ordering_filename Path of a file specifying which mux2 gauge points are
5502#                          to be stored.
5503# @note Also convert latitude and longitude to UTM. All coordinates are
5504#       assumed to be given in the GDA94 datum.
5505def urs2sts(basename_in, basename_out=None,
5506            weights=None,
5507            verbose=False,
5508            origin=None,
5509            zone=None,
5510            central_meridian=None,           
5511            mean_stage=0.0,
5512            zscale=1.0,
5513            ordering_filename=None):
5514    """Convert URS mux2 format for wave propagation to sts format
5515
5516    Also convert latitude and longitude to UTM. All coordinates are
5517    assumed to be given in the GDA94 datum
5518
5519    origin is a 3-tuple with geo referenced
5520    UTM coordinates (zone, easting, northing)
5521
5522    inputs:
5523
5524    basename_in: list of source file prefixes
5525
5526        These are combined with the extensions:
5527        WAVEHEIGHT_MUX2_LABEL = '-z-mux2' for stage
5528        EAST_VELOCITY_MUX2_LABEL = '-e-mux2' xmomentum
5529        NORTH_VELOCITY_MUX2_LABEL = '-n-mux2' and ymomentum
5530
5531        to create a 2D list of mux2 file. The rows are associated with each
5532        quantity and must have the above extensions
5533        the columns are the list of file prefixes.
5534
5535    ordering: a .txt file name specifying which mux2 gauge points are
5536              to be stored. This is indicated by the index of the gauge
5537              in the ordering file.
5538
5539              ordering file format:
5540              1st line:    'index,longitude,latitude\n'
5541              other lines: index,longitude,latitude
5542
5543              If ordering is None or ordering file is empty then
5544               all points are taken in the order they
5545              appear in the mux2 file.
5546
5547
5548    output:
5549      basename_out: name of sts file in which mux2 data is stored.
5550     
5551     
5552     
5553    NOTE: South is positive in mux files so sign of y-component of velocity is reverted
5554    """
5555
5556    import os
5557    from Scientific.IO.NetCDF import NetCDFFile
5558    from types import ListType,StringType
5559    from operator import __and__
5560
5561    if not isinstance(basename_in, ListType):
5562        if verbose: print 'Reading single source'
5563        basename_in = [basename_in]
5564
5565    # This is the value used in the mux file format to indicate NAN data
5566    # FIXME (Ole): This should be changed everywhere to IEEE NAN when
5567    #              we upgrade to Numpy
5568    NODATA = 99
5569
5570    # Check that basename is a list of strings
5571    if not reduce(__and__, map(lambda z:isinstance(z,StringType), basename_in)):
5572        msg= 'basename_in must be a string or list of strings'
5573        raise Exception, msg
5574
5575    # Find the number of sources to be used
5576    numSrc = len(basename_in)
5577
5578    # A weight must be specified for each source
5579    if weights is None:
5580        # Default is equal weighting
5581        weights = num.ones(numSrc, num.Float) / numSrc
5582    else:
5583        weights = ensure_numeric(weights)
5584        msg = 'When combining multiple sources must specify a weight for ' \
5585              'mux2 source file'
5586        assert len(weights) == numSrc, msg
5587
5588    if verbose: print 'Weights used in urs2sts:', weights
5589
5590    # Check output filename
5591    if basename_out is None:
5592        msg = 'STS filename must be specified as basename_out ' \
5593              'in function urs2sts'
5594        raise Exception, msg
5595
5596    if basename_out.endswith('.sts'):
5597        stsname = basename_out
5598    else:
5599        stsname = basename_out + '.sts'
5600
5601    # Create input filenames from basenames and check their existence
5602    files_in = [[], [], []]
5603    for files in basename_in:
5604        files_in[0].append(files + WAVEHEIGHT_MUX2_LABEL),
5605        files_in[1].append(files + EAST_VELOCITY_MUX2_LABEL)
5606        files_in[2].append(files + NORTH_VELOCITY_MUX2_LABEL)
5607
5608    quantities = ['HA','UA','VA'] # Quantity names used in the MUX2 format
5609    for i in range(len(quantities)):
5610        for file_in in files_in[i]:
5611            if (os.access(file_in, os.R_OK) == 0):
5612                msg = 'File %s does not exist or is not accessible' % file_in
5613                raise IOError, msg
5614
5615    # Establish permutation array
5616    if ordering_filename is not None:
5617        if verbose is True: print 'Reading ordering file', ordering_filename
5618
5619        # Read ordering file
5620        try:
5621            fid = open(ordering_filename, 'r')
5622            file_header = fid.readline().split(',')
5623            ordering_lines = fid.readlines()
5624            fid.close()
5625        except:
5626            msg = 'Cannot open %s' % ordering_filename
5627            raise Exception, msg
5628
5629        reference_header = 'index, longitude, latitude\n'
5630        reference_header_split = reference_header.split(',')
5631        for i in range(3):
5632            if not file_header[i].strip() == reference_header_split[i].strip():
5633                msg = 'File must contain header: ' + reference_header
5634                raise Exception, msg
5635
5636        if len(ordering_lines) < 2:
5637            msg = 'File must contain at least two points'
5638            raise Exception, msg
5639
5640        permutation = [int(line.split(',')[0]) for line in ordering_lines]
5641        permutation = ensure_numeric(permutation)
5642    else:
5643        permutation = None
5644
5645    # Read MUX2 files
5646    if (verbose): print 'reading mux2 file'
5647
5648    mux={}
5649    for i, quantity in enumerate(quantities):
5650        # For each quantity read the associated list of source mux2 file with
5651        # extention associated with that quantity
5652
5653        times, latitudes, longitudes, elevation, mux[quantity], starttime \
5654            = read_mux2_py(files_in[i], weights, permutation, verbose=verbose)
5655
5656        # Check that all quantities have consistent time and space information
5657        if quantity != quantities[0]:
5658            msg = '%s, %s and %s have inconsistent gauge data' \
5659                  % (files_in[0], files_in[1], files_in[2])
5660            assert num.allclose(times, times_old), msg
5661            assert num.allclose(latitudes, latitudes_old), msg
5662            assert num.allclose(longitudes, longitudes_old), msg
5663            assert num.allclose(elevation, elevation_old), msg
5664            assert num.allclose(starttime, starttime_old), msg
5665        times_old = times
5666        latitudes_old = latitudes
5667        longitudes_old = longitudes
5668        elevation_old = elevation
5669        starttime_old = starttime
5670
5671        # Self check - can be removed to improve speed
5672        #ref_longitudes = [float(line.split(',')[1]) for line in ordering_lines]
5673        #ref_latitudes = [float(line.split(',')[2]) for line in ordering_lines]
5674        #
5675        #msg = 'Longitudes specified in ordering file do not match those ' \
5676        #      'found in mux files. ' \
5677        #      'I got %s instead of %s (only beginning shown)' \
5678        #      % (str(longitudes[:10]) + '...',
5679        #         str(ref_longitudes[:10]) + '...')
5680        #assert allclose(longitudes, ref_longitudes), msg
5681        #
5682        #msg = 'Latitudes specified in ordering file do not match those ' \
5683        #      'found in mux files. '
5684        #      'I got %s instead of %s (only beginning shown)' \
5685        #      % (str(latitudes[:10]) + '...',
5686        #         str(ref_latitudes[:10]) + '...')
5687        #assert allclose(latitudes, ref_latitudes), msg
5688
5689    # Store timeseries in STS file
5690    msg = 'File is empty and or clipped region not in file region'
5691    assert len(latitudes > 0), msg
5692
5693    number_of_points = latitudes.shape[0]      # Number of stations retrieved
5694    number_of_times = times.shape[0]           # Number of timesteps
5695    number_of_latitudes = latitudes.shape[0]   # Number latitudes
5696    number_of_longitudes = longitudes.shape[0] # Number longitudes
5697
5698    # The permutation vector of contains original indices
5699    # as given in ordering file or None in which case points
5700    # are assigned the trivial indices enumerating them from
5701    # 0 to number_of_points-1
5702    if permutation is None:
5703        permutation = num.arange(number_of_points, typecode=num.Int)
5704
5705    # NetCDF file definition
5706    outfile = NetCDFFile(stsname, netcdf_mode_w)
5707
5708    description = 'Converted from URS mux2 files: %s' % basename_in
5709
5710    # Create new file
5711    sts = Write_sts()
5712    sts.store_header(outfile,
5713                     times+starttime,
5714                     number_of_points,
5715                     description=description,
5716                     verbose=verbose,
5717                     sts_precision=num.Float)
5718
5719    # Store
5720    from anuga.coordinate_transforms.redfearn import redfearn
5721
5722    x = num.zeros(number_of_points, num.Float)  # Easting
5723    y = num.zeros(number_of_points, num.Float)  # Northing
5724
5725    # Check zone boundaries
5726    if zone is None:
5727        refzone, _, _ = redfearn(latitudes[0], longitudes[0],
5728                                 central_meridian=central_meridian)
5729    else:
5730        refzone = zone
5731
5732       
5733
5734    old_zone = refzone
5735
5736    for i in range(number_of_points):
5737        computed_zone, easting, northing = redfearn(latitudes[i], longitudes[i],
5738                                                    zone=zone,
5739                                                    central_meridian=central_meridian)
5740        x[i] = easting
5741        y[i] = northing
5742        if computed_zone != refzone:
5743            msg = 'All sts gauges need to be in the same zone. \n'
5744            msg += 'offending gauge:Zone %d,%.4f, %4f\n' \
5745                   % (computed_zone, easting, northing)
5746            msg += 'previous gauge:Zone %d,%.4f, %4f' \
5747                   % (old_zone, old_easting, old_northing)
5748            raise Exception, msg
5749        old_zone = computed_zone
5750        old_easting = easting
5751        old_northing = northing
5752
5753    if origin is None:
5754        origin = Geo_reference(refzone, min(x), min(y))
5755    geo_ref = write_NetCDF_georeference(origin, outfile)
5756
5757    elevation = num.resize(elevation, outfile.variables['elevation'][:].shape)
5758    outfile.variables['permutation'][:] = permutation.astype(num.Int32) # Opteron 64
5759    outfile.variables['x'][:] = x - geo_ref.get_xllcorner()
5760    outfile.variables['y'][:] = y - geo_ref.get_yllcorner()
5761    outfile.variables['elevation'][:] = elevation
5762
5763    stage = outfile.variables['stage']
5764    xmomentum = outfile.variables['xmomentum']
5765    ymomentum = outfile.variables['ymomentum']
5766
5767    if verbose: print 'Converting quantities'
5768
5769    for j in range(len(times)):
5770        for i in range(number_of_points):
5771            ha = mux['HA'][i,j]
5772            ua = mux['UA'][i,j]
5773            va = mux['VA'][i,j]
5774            if ha == NODATA:
5775                if verbose:
5776                    msg = 'Setting nodata value %d to 0 at time = %f, ' \
5777                          'point = %d' % (ha, times[j], i)
5778                    print msg
5779                ha = 0.0
5780                ua = 0.0
5781                va = 0.0
5782
5783            w = zscale*ha + mean_stage
5784            h = w - elevation[i]
5785            stage[j,i] = w
5786
5787            xmomentum[j,i] = ua * h
5788            ymomentum[j,i] = -va * h # South is positive in mux files
5789
5790
5791    outfile.close()
5792
5793
5794##
5795# @brief Create a list of points defining a boundary from an STS file.
5796# @param stsname Stem of path to the STS file to read.
5797# @return A list of boundary points.
5798def create_sts_boundary(stsname):
5799    """Create a list of points defining a boundary from an STS file.
5800
5801    Create boundary segments from .sts file. Points can be stored in
5802    arbitrary order within the .sts file. The order in which the .sts points
5803    make up the boundary are given in order.txt file
5804
5805    FIXME:
5806    Point coordinates are stored in relative eastings and northings.
5807    But boundary is produced in absolute coordinates
5808    """
5809
5810    try:
5811        fid = NetCDFFile(stsname + '.sts', netcdf_mode_r)
5812    except:
5813        msg = 'Cannot open %s' % stsname + '.sts'
5814        raise msg
5815
5816    xllcorner = fid.xllcorner[0]
5817    yllcorner = fid.yllcorner[0]
5818
5819    #Points stored in sts file are normalised to [xllcorner,yllcorner] but
5820    #we cannot assume that boundary polygon will be. At least the
5821    #additional points specified by the user after this function is called
5822    x = fid.variables['x'][:] + xllcorner
5823    y = fid.variables['y'][:] + yllcorner
5824
5825    x = num.reshape(x, (len(x),1))
5826    y = num.reshape(y, (len(y),1))
5827    sts_points = num.concatenate((x,y), axis=1)
5828
5829    return sts_points.tolist()
5830
5831
5832##
5833# @brief A class to write an SWW file.
5834class Write_sww:
5835    from anuga.shallow_water.shallow_water_domain import Domain
5836
5837    # FIXME (Ole): Hardwiring the conserved quantities like
5838    # this could be a problem. I would prefer taking them from
5839    # the instantiation of Domain.
5840    #
5841    # (DSG) There is not always a Domain instance when Write_sww is used.
5842    # Check to see if this is the same level of hardwiring as is in
5843    # shallow water doamain.
5844
5845    sww_quantities = Domain.conserved_quantities
5846
5847    RANGE = '_range'
5848    EXTREMA = ':extrema'
5849
5850    ##
5851    # brief Instantiate the SWW writer class.
5852    def __init__(self):
5853        pass
5854
5855    ##
5856    # @brief Store a header in the SWW file.
5857    # @param outfile Open handle to the file that will be written.
5858    # @param times A list of time slices *or* a start time.
5859    # @param number_of_volumes The number of triangles.
5860    # @param number_of_points The number of points.
5861    # @param description The internal file description string.
5862    # @param smoothing True if smoothing is to be used.
5863    # @param order
5864    # @param sww_precision Data type of the quantitiy to be written (Float32)
5865    # @param verbose True if this function is to be verbose.
5866    # @note If 'times' is a list, the info will be made relative.
5867    def store_header(self,
5868                     outfile,
5869                     times,
5870                     number_of_volumes,
5871                     number_of_points,
5872                     description='Converted from XXX',
5873                     smoothing=True,
5874                     order=1,
5875                     sww_precision=num.Float32,
5876                     verbose=False):
5877        """Write an SWW file header.
5878
5879        outfile - the open file that will be written
5880        times - A list of the time slice times OR a start time
5881        Note, if a list is given the info will be made relative.
5882        number_of_volumes - the number of triangles
5883        """
5884
5885        outfile.institution = 'Geoscience Australia'
5886        outfile.description = description
5887
5888        # For sww compatibility
5889        if smoothing is True:
5890            # Smoothing to be depreciated
5891            outfile.smoothing = 'Yes'
5892            outfile.vertices_are_stored_uniquely = 'False'
5893        else:
5894            # Smoothing to be depreciated
5895            outfile.smoothing = 'No'
5896            outfile.vertices_are_stored_uniquely = 'True'
5897        outfile.order = order
5898
5899        try:
5900            revision_number = get_revision_number()
5901        except:
5902            revision_number = None
5903        # Allow None to be stored as a string
5904        outfile.revision_number = str(revision_number)
5905
5906        # This is being used to seperate one number from a list.
5907        # what it is actually doing is sorting lists from numeric arrays.
5908        if type(times) is list or type(times) is num.ArrayType:
5909            number_of_times = len(times)
5910            times = ensure_numeric(times)
5911            if number_of_times == 0:
5912                starttime = 0
5913            else:
5914                starttime = times[0]
5915                times = times - starttime  #Store relative times
5916        else:
5917            number_of_times = 0
5918            starttime = times
5919            #times = ensure_numeric([])
5920
5921        outfile.starttime = starttime
5922
5923        # dimension definitions
5924        outfile.createDimension('number_of_volumes', number_of_volumes)
5925        outfile.createDimension('number_of_vertices', 3)
5926        outfile.createDimension('numbers_in_range', 2)
5927
5928        if smoothing is True:
5929            outfile.createDimension('number_of_points', number_of_points)
5930            # FIXME(Ole): This will cause sww files for parallel domains to
5931            # have ghost nodes stored (but not used by triangles).
5932            # To clean this up, we have to change get_vertex_values and
5933            # friends in quantity.py (but I can't be bothered right now)
5934        else:
5935            outfile.createDimension('number_of_points', 3*number_of_volumes)
5936
5937        outfile.createDimension('number_of_timesteps', number_of_times)
5938
5939        # variable definitions
5940        outfile.createVariable('x', sww_precision, ('number_of_points',))
5941        outfile.createVariable('y', sww_precision, ('number_of_points',))
5942        outfile.createVariable('elevation', sww_precision,
5943                               ('number_of_points',))
5944        q = 'elevation'
5945        outfile.createVariable(q + Write_sww.RANGE, sww_precision,
5946                               ('numbers_in_range',))
5947
5948
5949        # Initialise ranges with small and large sentinels.
5950        # If this was in pure Python we could have used None sensibly
5951        outfile.variables[q+Write_sww.RANGE][0] = max_float  # Min
5952        outfile.variables[q+Write_sww.RANGE][1] = -max_float # Max
5953
5954        # FIXME: Backwards compatibility
5955        outfile.createVariable('z', sww_precision, ('number_of_points',))
5956
5957        outfile.createVariable('volumes', num.Int, ('number_of_volumes',
5958                                                    'number_of_vertices'))
5959
5960        # Doing sww_precision instead of Float gives cast errors.
5961        outfile.createVariable('time', num.Float,
5962                               ('number_of_timesteps',))
5963
5964        for q in Write_sww.sww_quantities:
5965            outfile.createVariable(q, sww_precision, ('number_of_timesteps',
5966                                                      'number_of_points'))
5967            outfile.createVariable(q + Write_sww.RANGE, sww_precision,
5968                                   ('numbers_in_range',))
5969
5970            # Initialise ranges with small and large sentinels.
5971            # If this was in pure Python we could have used None sensibly
5972            outfile.variables[q+Write_sww.RANGE][0] = max_float  # Min
5973            outfile.variables[q+Write_sww.RANGE][1] = -max_float # Max
5974
5975        if type(times) is list or type(times) is num.ArrayType:
5976            outfile.variables['time'][:] = times    #Store time relative
5977
5978        if verbose:
5979            print '------------------------------------------------'
5980            print 'Statistics:'
5981            print '    t in [%f, %f], len(t) == %d' \
5982                  % (min(times.flat), max(times.flat), len(times.flat))
5983
5984    ##
5985    # @brief Store triangulation data in the underlying file.
5986    # @param outfile Open handle to underlying file.
5987    # @param points_utm List or array of points in UTM.
5988    # @param volumes
5989    # @param elevation
5990    # @param zone
5991    # @param new_origin georeference that the points can be set to.
5992    # @param points_georeference The georeference of the points_utm.
5993    # @param verbose True if this function is to be verbose.
5994    def store_triangulation(self,
5995                            outfile,
5996                            points_utm,
5997                            volumes,
5998                            elevation, zone=None, new_origin=None,
5999                            points_georeference=None, verbose=False):
6000        """
6001        new_origin - qa georeference that the points can be set to. (Maybe
6002        do this before calling this function.)
6003
6004        points_utm - currently a list or array of the points in UTM.
6005        points_georeference - the georeference of the points_utm
6006
6007        How about passing new_origin and current_origin.
6008        If you get both, do a convertion from the old to the new.
6009
6010        If you only get new_origin, the points are absolute,
6011        convert to relative
6012
6013        if you only get the current_origin the points are relative, store
6014        as relative.
6015
6016        if you get no georefs create a new georef based on the minimums of
6017        points_utm.  (Another option would be to default to absolute)
6018
6019        Yes, and this is done in another part of the code.
6020        Probably geospatial.
6021
6022        If you don't supply either geo_refs, then supply a zone. If not
6023        the default zone will be used.
6024
6025        precon:
6026            header has been called.
6027        """
6028
6029        number_of_points = len(points_utm)
6030        volumes = num.array(volumes)
6031        points_utm = num.array(points_utm)
6032
6033        # given the two geo_refs and the points, do the stuff
6034        # described in the method header
6035        # if this is needed else where, pull out as a function
6036        points_georeference = ensure_geo_reference(points_georeference)
6037        new_origin = ensure_geo_reference(new_origin)
6038        if new_origin is None and points_georeference is not None:
6039            points = points_utm
6040            geo_ref = points_georeference
6041        else:
6042            if new_origin is None:
6043                new_origin = Geo_reference(zone, min(points_utm[:,0]),
6044                                                 min(points_utm[:,1]))
6045            points = new_origin.change_points_geo_ref(points_utm,
6046                                                      points_georeference)
6047            geo_ref = new_origin
6048
6049        # At this stage I need a georef and points
6050        # the points are relative to the georef
6051        geo_ref.write_NetCDF(outfile)
6052
6053        # This will put the geo ref in the middle
6054        #geo_ref = Geo_reference(refzone,(max(x)+min(x))/2.0,(max(x)+min(y))/2.)
6055
6056        x =  points[:,0]
6057        y =  points[:,1]
6058        z = outfile.variables['z'][:]
6059
6060        if verbose:
6061            print '------------------------------------------------'
6062            print 'More Statistics:'
6063            print '  Extent (/lon):'
6064            print '    x in [%f, %f], len(lat) == %d' \
6065                  % (min(x), max(x), len(x))
6066            print '    y in [%f, %f], len(lon) == %d' \
6067                  % (min(y), max(y), len(y))
6068            print '    z in [%f, %f], len(z) == %d' \
6069                  % (min(elevation), max(elevation), len(elevation))
6070            print 'geo_ref: ',geo_ref
6071            print '------------------------------------------------'
6072
6073        #z = resize(bath_grid, outfile.variables['z'][:].shape)
6074        outfile.variables['x'][:] = points[:,0] #- geo_ref.get_xllcorner()
6075        outfile.variables['y'][:] = points[:,1] #- geo_ref.get_yllcorner()
6076        outfile.variables['z'][:] = elevation
6077        outfile.variables['elevation'][:] = elevation  #FIXME HACK
6078        outfile.variables['volumes'][:] = volumes.astype(num.Int32) #On Opteron 64
6079
6080        q = 'elevation'
6081        # This updates the _range values
6082        outfile.variables[q + Write_sww.RANGE][0] = min(elevation)
6083        outfile.variables[q + Write_sww.RANGE][1] = max(elevation)
6084
6085
6086    ##
6087    # @brief Write the quantity data to the underlying file.
6088    # @param outfile Handle to open underlying file.
6089    # @param sww_precision Format of quantity data to write (default Float32).
6090    # @param slice_index
6091    # @param time
6092    # @param verbose True if this function is to be verbose.
6093    # @param **quant
6094    def store_quantities(self, outfile, sww_precision=num.Float32,
6095                         slice_index=None, time=None,
6096                         verbose=False, **quant):
6097        """
6098        Write the quantity info.
6099
6100        **quant is extra keyword arguments passed in. These must be
6101          the sww quantities, currently; stage, xmomentum, ymomentum.
6102
6103        if the time array is already been built, use the slice_index
6104        to specify the index.
6105
6106        Otherwise, use time to increase the time dimension
6107
6108        Maybe make this general, but the viewer assumes these quantities,
6109        so maybe we don't want it general - unless the viewer is general
6110
6111        precon
6112        triangulation and
6113        header have been called.
6114        """
6115
6116        if time is not None:
6117            file_time = outfile.variables['time']
6118            slice_index = len(file_time)
6119            file_time[slice_index] = time
6120
6121        # Write the conserved quantities from Domain.
6122        # Typically stage,  xmomentum, ymomentum
6123        # other quantities will be ignored, silently.
6124        # Also write the ranges: stage_range,
6125        # xmomentum_range and ymomentum_range
6126        for q in Write_sww.sww_quantities:
6127            if not quant.has_key(q):
6128                msg = 'SWW file can not write quantity %s' % q
6129                raise NewQuantity, msg
6130            else:
6131                q_values = quant[q]
6132                outfile.variables[q][slice_index] = \
6133                                q_values.astype(sww_precision)
6134       
6135                # This updates the _range values
6136                q_range = outfile.variables[q + Write_sww.RANGE][:]
6137                q_values_min = min(q_values)
6138                if q_values_min < q_range[0]:
6139                    outfile.variables[q + Write_sww.RANGE][0] = q_values_min
6140                q_values_max = max(q_values)
6141                if q_values_max > q_range[1]:
6142                    outfile.variables[q + Write_sww.RANGE][1] = q_values_max
6143
6144    ##
6145    # @brief Print the quantities in the underlying file.
6146    # @param outfile UNUSED.
6147    def verbose_quantities(self, outfile):
6148        print '------------------------------------------------'
6149        print 'More Statistics:'
6150        for q in Write_sww.sww_quantities:
6151            print %s in [%f, %f]' % (q,
6152                                        outfile.variables[q+Write_sww.RANGE][0],
6153                                        outfile.variables[q+Write_sww.RANGE][1])
6154        print '------------------------------------------------'
6155
6156
6157##
6158# @brief Obsolete?
6159# @param outfile
6160# @param has
6161# @param uas
6162# @param vas
6163# @param elevation
6164# @param mean_stage
6165# @param zscale
6166# @param verbose
6167def obsolete_write_sww_time_slices(outfile, has, uas, vas, elevation,
6168                                   mean_stage=0, zscale=1,
6169                                   verbose=False):
6170    #Time stepping
6171    stage = outfile.variables['stage']
6172    xmomentum = outfile.variables['xmomentum']
6173    ymomentum = outfile.variables['ymomentum']
6174
6175    n = len(has)
6176    j = 0
6177    for ha, ua, va in map(None, has, uas, vas):
6178        if verbose and j % ((n+10)/10) == 0: print '  Doing %d of %d' % (j, n)
6179        w = zscale*ha + mean_stage
6180        stage[j] = w
6181        h = w - elevation
6182        xmomentum[j] = ua * h
6183        ymomentum[j] = -1 * va * h  # -1 since in mux files south is positive.
6184        j += 1
6185
6186
6187##
6188# @brief Convert a set of URS files to a text file.
6189# @param basename_in Stem path to the 3 URS files.
6190# @param location_index ??
6191def urs2txt(basename_in, location_index=None):
6192    """
6193    Not finished or tested
6194    """
6195
6196    files_in = [basename_in + WAVEHEIGHT_MUX_LABEL,
6197                basename_in + EAST_VELOCITY_LABEL,
6198                basename_in + NORTH_VELOCITY_LABEL]
6199    quantities = ['HA','UA','VA']
6200
6201    d = ","
6202
6203    # instantiate urs_points of the three mux files.
6204    mux = {}
6205    for quantity, file in map(None, quantities, files_in):
6206        mux[quantity] = Urs_points(file)
6207
6208    # Could check that the depth is the same. (hashing)
6209
6210    # handle to a mux file to do depth stuff
6211    a_mux = mux[quantities[0]]
6212
6213    # Convert to utm
6214    latitudes = a_mux.lonlatdep[:,1]
6215    longitudes = a_mux.lonlatdep[:,0]
6216    points_utm, zone = \
6217        convert_from_latlon_to_utm(latitudes=latitudes, longitudes=longitudes)
6218    depths = a_mux.lonlatdep[:,2]
6219
6220    # open the output text file, start writing.
6221    fid = open(basename_in + '.txt', 'w')
6222
6223    fid.write("zone: " + str(zone) + "\n")
6224
6225    if location_index is not None:
6226        #Title
6227        li = location_index
6228        fid.write('location_index' + d + 'lat' + d + 'long' + d +
6229                  'Easting' + d + 'Northing' + '\n')
6230        fid.write(str(li) + d + str(latitudes[li]) + d +
6231                  str(longitudes[li]) + d + str(points_utm[li][0]) + d +
6232                  str(points_utm[li][01]) + '\n')
6233
6234    # the non-time dependent stuff
6235    #Title
6236    fid.write('location_index' + d + 'lat' + d + 'long' + d +
6237              'Easting' + d + 'Northing' + d + 'depth m' + '\n')
6238    i = 0
6239    for depth, point_utm, lat, long in map(None, depths, points_utm,
6240                                           latitudes, longitudes):
6241
6242        fid.write(str(i) + d + str(lat) + d + str(long) + d +
6243                  str(point_utm[0]) + d + str(point_utm[01]) + d +
6244                  str(depth) + '\n')
6245        i += 1
6246
6247    #Time dependent
6248    if location_index is not None:
6249        time_step = a_mux.time_step
6250        i = 0
6251        #Title
6252        fid.write('time' + d + 'HA depth m' + d + 'UA momentum East x m/sec' +
6253                  d + 'VA momentum North y m/sec' + '\n')
6254        for HA, UA, VA in map(None, mux['HA'], mux['UA'], mux['VA']):
6255            fid.write(str(i*time_step) + d + str(HA[location_index]) + d +
6256                      str(UA[location_index]) + d +
6257                      str(VA[location_index]) + '\n')
6258            i += 1
6259
6260
6261##
6262# @brief A class to write STS files.
6263class Write_sts:
6264    sts_quantities = ['stage','xmomentum','ymomentum']
6265    RANGE = '_range'
6266    EXTREMA = ':extrema'
6267
6268    ##
6269    # @brief Instantiate this STS writer class.
6270    def __init__(self):
6271        pass
6272
6273    ##
6274    # @brief Write a header to the underlying data file.
6275    # @param outfile Handle to open file to write.
6276    # @param times A list of the time slice times *or* a start time.
6277    # @param number_of_points The number of URS gauge sites.
6278    # @param description Description string to write into the STS file.
6279    # @param sts_precision Format of data to write (default Float32).
6280    # @param verbose True if this function is to be verbose.
6281    # @note If 'times' is a list, the info will be made relative.
6282    def store_header(self,
6283                     outfile,
6284                     times,
6285                     number_of_points,
6286                     description='Converted from URS mux2 format',
6287                     sts_precision=num.Float32,
6288                     verbose=False):
6289        """
6290        outfile - the name of the file that will be written
6291        times - A list of the time slice times OR a start time
6292        Note, if a list is given the info will be made relative.
6293        number_of_points - the number of urs gauges sites
6294        """
6295
6296        outfile.institution = 'Geoscience Australia'
6297        outfile.description = description
6298
6299        try:
6300            revision_number = get_revision_number()
6301        except:
6302            revision_number = None
6303
6304        # Allow None to be stored as a string
6305        outfile.revision_number = str(revision_number)
6306
6307        # Start time in seconds since the epoch (midnight 1/1/1970)
6308        # This is being used to seperate one number from a list.
6309        # what it is actually doing is sorting lists from numeric arrays.
6310        if type(times) is list or type(times) is num.ArrayType:
6311            number_of_times = len(times)
6312            times = ensure_numeric(times)
6313            if number_of_times == 0:
6314                starttime = 0
6315            else:
6316                starttime = times[0]
6317                times = times - starttime  #Store relative times
6318        else:
6319            number_of_times = 0
6320            starttime = times
6321
6322        outfile.starttime = starttime
6323
6324        # Dimension definitions
6325        outfile.createDimension('number_of_points', number_of_points)
6326        outfile.createDimension('number_of_timesteps', number_of_times)
6327        outfile.createDimension('numbers_in_range', 2)
6328
6329        # Variable definitions
6330        outfile.createVariable('permutation', num.Int, ('number_of_points',))
6331        outfile.createVariable('x', sts_precision, ('number_of_points',))
6332        outfile.createVariable('y', sts_precision, ('number_of_points',))
6333        outfile.createVariable('elevation',sts_precision, ('number_of_points',))
6334
6335        q = 'elevation'
6336        outfile.createVariable(q + Write_sts.RANGE, sts_precision,
6337                               ('numbers_in_range',))
6338
6339        # Initialise ranges with small and large sentinels.
6340        # If this was in pure Python we could have used None sensibly
6341        outfile.variables[q + Write_sts.RANGE][0] = max_float  # Min
6342        outfile.variables[q + Write_sts.RANGE][1] = -max_float # Max
6343
6344        # Doing sts_precision instead of Float gives cast errors.
6345        outfile.createVariable('time', num.Float, ('number_of_timesteps',))
6346
6347        for q in Write_sts.sts_quantities:
6348            outfile.createVariable(q, sts_precision, ('number_of_timesteps',
6349                                                      'number_of_points'))
6350            outfile.createVariable(q + Write_sts.RANGE, sts_precision,
6351                                   ('numbers_in_range',))
6352            # Initialise ranges with small and large sentinels.
6353            # If this was in pure Python we could have used None sensibly
6354            outfile.variables[q + Write_sts.RANGE][0] = max_float  # Min
6355            outfile.variables[q + Write_sts.RANGE][1] = -max_float # Max
6356
6357        if type(times) is list or type(times) is num.ArrayType:
6358            outfile.variables['time'][:] = times    #Store time relative
6359
6360        if verbose:
6361            print '------------------------------------------------'
6362            print 'Statistics:'
6363            print '    t in [%f, %f], len(t) == %d' \
6364                  % (min(times.flat), max(times.flat), len(times.flat))
6365
6366    ##
6367    # @brief
6368    # @param outfile
6369    # @param points_utm
6370    # @param elevation
6371    # @param zone
6372    # @param new_origin
6373    # @param points_georeference
6374