source: branches/numpy/anuga/shallow_water/data_manager.py @ 6902

Last change on this file since 6902 was 6902, checked in by rwilson, 16 years ago

Back-merge from Numeric trunk to numpy branch.

File size: 255.3 KB
</
Line 
1"""datamanager.py - input output for AnuGA
2
3
4This module takes care of reading and writing datafiles such as topograhies,
5model output, etc
6
7
8Formats used within AnuGA:
9
10.sww: Netcdf format for storing model output f(t,x,y)
11.tms: Netcdf format for storing time series f(t)
12
13.csv: ASCII format for storing arbitrary points and associated attributes
14.pts: NetCDF format for storing arbitrary points and associated attributes
15
16.asc: ASCII format of regular DEMs as output from ArcView
17.prj: Associated ArcView file giving more meta data for asc format
18.ers: ERMapper header format of regular DEMs for ArcView
19
20.dem: NetCDF representation of regular DEM data
21
22.tsh: ASCII format for storing meshes and associated boundary and region info
23.msh: NetCDF format for storing meshes and associated boundary and region info
24
25.nc: Native ferret NetCDF format
26.geo: Houdinis ascii geometry format (?)
27
28
29A typical dataflow can be described as follows
30
31Manually created files:
32ASC, PRJ:     Digital elevation models (gridded)
33TSH:          Triangular meshes (e.g. created from anuga.pmesh)
34NC            Model outputs for use as boundary conditions (e.g from MOST)
35
36
37AUTOMATICALLY CREATED FILES:
38
39ASC, PRJ  ->  DEM  ->  PTS: Conversion of DEM's to native pts file
40
41NC -> SWW: Conversion of MOST bundary files to boundary sww
42
43PTS + TSH -> TSH with elevation: Least squares fit
44
45TSH -> SWW:  Conversion of TSH to sww viewable using Swollen
46
47TSH + Boundary SWW -> SWW: Simluation using abstract_2d_finite_volumes
48
49"""
50
51# This file was reverted from changeset:5484 to changeset:5470 on 10th July
52# by Ole.
53
54import os, sys
55import csv
56import exceptions
57import string
58import shutil
59from struct import unpack
60import array as p_array
61from os import sep, path, remove, mkdir, access, F_OK, W_OK, getcwd
62
63import numpy as num
64
65from Scientific.IO.NetCDF import NetCDFFile
66from os.path import exists, basename, join
67from os import getcwd
68
69from anuga.coordinate_transforms.redfearn import redfearn, \
70     convert_from_latlon_to_utm
71from anuga.coordinate_transforms.geo_reference import Geo_reference, \
72     write_NetCDF_georeference, ensure_geo_reference
73from anuga.geospatial_data.geospatial_data import Geospatial_data,\
74     ensure_absolute
75from anuga.config import minimum_storable_height as \
76     default_minimum_storable_height
77from anuga.config import netcdf_mode_r, netcdf_mode_w, netcdf_mode_a
78from anuga.config import netcdf_float, netcdf_float32, netcdf_int
79from anuga.config import max_float
80from anuga.utilities.numerical_tools import ensure_numeric,  mean
81from anuga.caching.caching import myhash
82from anuga.utilities.anuga_exceptions import ANUGAError
83from anuga.shallow_water import Domain
84from anuga.abstract_2d_finite_volumes.pmesh2domain import \
85     pmesh_to_domain_instance
86from anuga.abstract_2d_finite_volumes.util import get_revision_number, \
87     remove_lone_verts, sww2timeseries, get_centroid_values
88
89from anuga.abstract_2d_finite_volumes.neighbour_mesh import segment_midpoints
90from anuga.load_mesh.loadASCII import export_mesh_file
91from anuga.utilities.polygon import intersection
92
93from anuga.utilities.system_tools import get_vars_in_expression
94
95
96# Default block size for sww2dem()
97DEFAULT_BLOCK_SIZE = 10000
98
99######
100# Exception classes
101######
102
103class TitleValueError(exceptions.Exception): pass
104class DataMissingValuesError(exceptions.Exception): pass
105class DataFileNotOpenError(exceptions.Exception): pass
106class DataTimeError(exceptions.Exception): pass
107class DataDomainError(exceptions.Exception): pass
108class NewQuantity(exceptions.Exception): pass
109
110
111######
112# formula mappings
113######
114
115quantity_formula = {'momentum':'(xmomentum**2 + ymomentum**2)**0.5',
116                    'depth':'stage-elevation',
117                    'speed': \
118 '(xmomentum**2 + ymomentum**2)**0.5/(stage-elevation+1.e-6/(stage-elevation))'}
119
120
121##
122# @brief Convert a possible filename into a standard form.
123# @param s Filename to process.
124# @return The new filename string.
125def make_filename(s):
126    """Transform argument string into a Sexsuitable filename
127    """
128
129    s = s.strip()
130    s = s.replace(' ', '_')
131    s = s.replace('(', '')
132    s = s.replace(')', '')
133    s = s.replace('__', '_')
134
135    return s
136
137
138##
139# @brief Check that a specified filesystem directory path exists.
140# @param path The dirstory path to check.
141# @param verbose True if this function is to be verbose.
142# @note If directory path doesn't exist, it will be created.
143def check_dir(path, verbose=None):
144    """Check that specified path exists.
145    If path does not exist it will be created if possible
146
147    USAGE:
148       checkdir(path, verbose):
149
150    ARGUMENTS:
151        path -- Directory
152        verbose -- Flag verbose output (default: None)
153
154    RETURN VALUE:
155        Verified path including trailing separator
156    """
157
158    import os.path
159
160    if sys.platform in ['nt', 'dos', 'win32', 'what else?']:
161        unix = 0
162    else:
163        unix = 1
164
165    # add terminal separator, if it's not already there
166    if path[-1] != os.sep:
167        path = path + os.sep
168
169    # expand ~ or ~username in path
170    path = os.path.expanduser(path)
171
172    # create directory if required
173    if not (os.access(path, os.R_OK and os.W_OK) or path == ''):
174        try:
175            exitcode = os.mkdir(path)
176
177            # Change access rights if possible
178            if unix:
179                exitcode = os.system('chmod 775 ' + path)
180            else:
181                pass  # FIXME: What about access rights under Windows?
182
183            if verbose: print 'MESSAGE: Directory', path, 'created.'
184        except:
185            print 'WARNING: Directory', path, 'could not be created.'
186            if unix:
187                path = '/tmp/'
188            else:
189                path = 'C:' + os.sep
190
191            print "Using directory '%s' instead" % path
192
193    return path
194
195
196##
197# @brief Delete directory and all sub-directories.
198# @param path Path to the directory to delete.
199def del_dir(path):
200    """Recursively delete directory path and all its contents
201    """
202
203    if os.path.isdir(path):
204        for file in os.listdir(path):
205            X = os.path.join(path, file)
206
207            if os.path.isdir(X) and not os.path.islink(X):
208                del_dir(X)
209            else:
210                try:
211                    os.remove(X)
212                except:
213                    print "Could not remove file %s" % X
214
215        os.rmdir(path)
216
217
218##
219# @brief ??
220# @param path
221# @param __func__
222# @param verbose True if this function is to be verbose.
223# @note ANOTHER OPTION, IF NEED IN THE FUTURE, Nick B 7/2007
224def rmgeneric(path, func, verbose=False):
225    ERROR_STR= """Error removing %(path)s, %(error)s """
226
227    try:
228        func(path)
229        if verbose: print 'Removed ', path
230    except OSError, (errno, strerror):
231        print ERROR_STR % {'path' : path, 'error': strerror }
232
233
234##
235# @brief Remove directory and all sub-directories.
236# @param path Filesystem path to directory to remove.
237# @param verbose True if this function is to be verbose.
238def removeall(path, verbose=False):
239    if not os.path.isdir(path):
240        return
241
242    for x in os.listdir(path):
243        fullpath = os.path.join(path, x)
244        if os.path.isfile(fullpath):
245            f = os.remove
246            rmgeneric(fullpath, f)
247        elif os.path.isdir(fullpath):
248            removeall(fullpath)
249            f = os.rmdir
250            rmgeneric(fullpath, f, verbose)
251
252
253##
254# @brief Create a standard filename.
255# @param datadir Directory where file is to be created.
256# @param filename Filename 'stem'.
257# @param format Format of the file, becomes filename extension.
258# @param size Size of file, becomes part of filename.
259# @param time Time (float), becomes part of filename.
260# @return The complete filename path, including directory.
261# @note The containing directory is created, if necessary.
262def create_filename(datadir, filename, format, size=None, time=None):
263    FN = check_dir(datadir) + filename
264
265    if size is not None:
266        FN += '_size%d' % size
267
268    if time is not None:
269        FN += '_time%.2f' % time
270
271    FN += '.' + format
272
273    return FN
274
275
276##
277# @brief Get all files with a standard name and a given set of attributes.
278# @param datadir Directory files must be in.
279# @param filename Filename stem.
280# @param format Filename extension.
281# @param size Filename size.
282# @return A list of fielnames (including directory) that match the attributes.
283def get_files(datadir, filename, format, size):
284    """Get all file (names) with given name, size and format
285    """
286
287    import glob
288
289    dir = check_dir(datadir)
290    pattern = dir + os.sep + filename + '_size=%d*.%s' % (size, format)
291
292    return glob.glob(pattern)
293
294
295##
296# @brief Generic class for storing output to e.g. visualisation or checkpointing
297class Data_format:
298    """Generic interface to data formats
299    """
300
301    ##
302    # @brief Instantiate this instance.
303    # @param domain
304    # @param extension
305    # @param mode The mode of the underlying file.
306    def __init__(self, domain, extension, mode=netcdf_mode_w):
307        assert mode[0] in ['r', 'w', 'a'], \
308               "Mode %s must be either:\n" % mode + \
309               "   'w' (write)\n" + \
310               "   'r' (read)\n" + \
311               "   'a' (append)"
312
313        #Create filename
314        self.filename = create_filename(domain.get_datadir(),
315                                        domain.get_name(), extension)
316
317        self.timestep = 0
318        self.domain = domain
319
320        # Exclude ghosts in case this is a parallel domain
321        self.number_of_nodes = domain.number_of_full_nodes
322        self.number_of_volumes = domain.number_of_full_triangles
323        #self.number_of_volumes = len(domain)
324
325        #FIXME: Should we have a general set_precision function?
326
327
328##
329# @brief Class for storing output to e.g. visualisation
330class Data_format_sww(Data_format):
331    """Interface to native NetCDF format (.sww) for storing model output
332
333    There are two kinds of data
334
335    1: Constant data: Vertex coordinates and field values. Stored once
336    2: Variable data: Conserved quantities. Stored once per timestep.
337
338    All data is assumed to reside at vertex locations.
339    """
340
341    ##
342    # @brief Instantiate this instance.
343    # @param domain ??
344    # @param mode Mode of the underlying data file.
345    # @param max_size ??
346    # @param recursion ??
347    # @note Prepare the underlying data file if mode starts with 'w'.
348    def __init__(self, domain, mode=netcdf_mode_w, max_size=2000000000, recursion=False):
349        from Scientific.IO.NetCDF import NetCDFFile
350
351        self.precision = netcdf_float32 #Use single precision for quantities
352        self.recursion = recursion
353        self.mode = mode
354        if hasattr(domain, 'max_size'):
355            self.max_size = domain.max_size #file size max is 2Gig
356        else:
357            self.max_size = max_size
358        if hasattr(domain, 'minimum_storable_height'):
359            self.minimum_storable_height = domain.minimum_storable_height
360        else:
361            self.minimum_storable_height = default_minimum_storable_height
362
363        # call owning constructor
364        Data_format.__init__(self, domain, 'sww', mode)
365
366        # NetCDF file definition
367        fid = NetCDFFile(self.filename, mode)
368        if mode[0] == 'w':
369            description = 'Output from anuga.abstract_2d_finite_volumes ' \
370                          'suitable for plotting'
371            self.writer = Write_sww()
372            self.writer.store_header(fid,
373                                     domain.starttime,
374                                     self.number_of_volumes,
375                                     self.domain.number_of_full_nodes,
376                                     description=description,
377                                     smoothing=domain.smooth,
378                                     order=domain.default_order,
379                                     sww_precision=self.precision)
380
381            # Extra optional information
382            if hasattr(domain, 'texture'):
383                fid.texture = domain.texture
384
385            if domain.quantities_to_be_monitored is not None:
386                fid.createDimension('singleton', 1)
387                fid.createDimension('two', 2)
388
389                poly = domain.monitor_polygon
390                if poly is not None:
391                    N = len(poly)
392                    fid.createDimension('polygon_length', N)
393                    fid.createVariable('extrema.polygon',
394                                       self.precision,
395                                       ('polygon_length', 'two'))
396                    fid.variables['extrema.polygon'][:] = poly
397
398                interval = domain.monitor_time_interval
399                if interval is not None:
400                    fid.createVariable('extrema.time_interval',
401                                       self.precision,
402                                       ('two',))
403                    fid.variables['extrema.time_interval'][:] = interval
404
405                for q in domain.quantities_to_be_monitored:
406                    fid.createVariable(q + '.extrema', self.precision,
407                                       ('numbers_in_range',))
408                    fid.createVariable(q + '.min_location', self.precision,
409                                       ('numbers_in_range',))
410                    fid.createVariable(q + '.max_location', self.precision,
411                                       ('numbers_in_range',))
412                    fid.createVariable(q + '.min_time', self.precision,
413                                       ('singleton',))
414                    fid.createVariable(q + '.max_time', self.precision,
415                                       ('singleton',))
416
417        fid.close()
418
419    ##
420    # @brief Store connectivity data into the underlying data file.
421    def store_connectivity(self):
422        """Specialisation of store_connectivity for net CDF format
423
424        Writes x,y,z coordinates of triangles constituting
425        the bed elevation.
426        """
427
428        from Scientific.IO.NetCDF import NetCDFFile
429
430        domain = self.domain
431
432        # append to the NetCDF file
433        fid = NetCDFFile(self.filename, netcdf_mode_a)
434
435#        # Get the variables
436#        x = fid.variables['x']
437#        y = fid.variables['y']
438#        z = fid.variables['elevation']
439#        volumes = fid.variables['volumes']
440
441        # Get X, Y and bed elevation Z
442        Q = domain.quantities['elevation']
443        X,Y,Z,V = Q.get_vertex_values(xy=True, precision=self.precision)
444
445        # store the connectivity data
446        points = num.concatenate( (X[:,num.newaxis],Y[:,num.newaxis]), axis=1 )
447        self.writer.store_triangulation(fid,
448                                        points,
449                                        V.astype(num.float32),
450                                        Z,
451                                        points_georeference=\
452                                            domain.geo_reference)
453
454        fid.close()
455
456    ##
457    # @brief Store time and named quantities to the underlying data file.
458    # @param names The names of the quantities to store.
459    # @note If 'names' not supplied, store a standard set of quantities.
460    def store_timestep(self, names=None):
461        """Store time and named quantities to file
462        """
463
464        from Scientific.IO.NetCDF import NetCDFFile
465        import types
466        from time import sleep
467        from os import stat
468
469        if names is None:
470            # Standard shallow water wave equation quantitites in ANUGA
471            names = ['stage', 'xmomentum', 'ymomentum']
472
473        # Get NetCDF
474        retries = 0
475        file_open = False
476        while not file_open and retries < 10:
477            try:
478                fid = NetCDFFile(self.filename, netcdf_mode_a) # Open existing file
479            except IOError:
480                # This could happen if someone was reading the file.
481                # In that case, wait a while and try again
482                msg = 'Warning (store_timestep): File %s could not be opened' \
483                      % self.filename
484                msg += ' - trying step %s again' % self.domain.time
485                print msg
486                retries += 1
487                sleep(1)
488            else:
489                file_open = True
490
491        if not file_open:
492            msg = 'File %s could not be opened for append' % self.filename
493            raise DataFileNotOpenError, msg
494
495        # Check to see if the file is already too big:
496        time = fid.variables['time']
497        i = len(time) + 1
498        file_size = stat(self.filename)[6]
499        file_size_increase = file_size / i
500        if file_size + file_size_increase > self.max_size * 2**self.recursion:
501            # In order to get the file name and start time correct,
502            # I change the domain.filename and domain.starttime.
503            # This is the only way to do this without changing
504            # other modules (I think).
505
506            # Write a filename addon that won't break swollens reader
507            # (10.sww is bad)
508            filename_ext = '_time_%s' % self.domain.time
509            filename_ext = filename_ext.replace('.', '_')
510
511            # Remember the old filename, then give domain a
512            # name with the extension
513            old_domain_filename = self.domain.get_name()
514            if not self.recursion:
515                self.domain.set_name(old_domain_filename + filename_ext)
516
517            # Change the domain starttime to the current time
518            old_domain_starttime = self.domain.starttime
519            self.domain.starttime = self.domain.time
520
521            # Build a new data_structure.
522            next_data_structure = Data_format_sww(self.domain, mode=self.mode,
523                                                  max_size=self.max_size,
524                                                  recursion=self.recursion+1)
525            if not self.recursion:
526                print '    file_size = %s' % file_size
527                print '    saving file to %s' % next_data_structure.filename
528
529            #set up the new data_structure
530            self.domain.writer = next_data_structure
531
532            #FIXME - could be cleaner to use domain.store_timestep etc.
533            next_data_structure.store_connectivity()
534            next_data_structure.store_timestep(names)
535            fid.sync()
536            fid.close()
537
538            #restore the old starttime and filename
539            self.domain.starttime = old_domain_starttime
540            self.domain.set_name(old_domain_filename)
541        else:
542            self.recursion = False
543            domain = self.domain
544
545            # Get the variables
546            time = fid.variables['time']
547            stage = fid.variables['stage']
548            xmomentum = fid.variables['xmomentum']
549            ymomentum = fid.variables['ymomentum']
550            i = len(time)
551            if type(names) not in [types.ListType, types.TupleType]:
552                names = [names]
553
554            if 'stage' in names \
555               and 'xmomentum' in names \
556               and 'ymomentum' in names:
557                # Get stage, elevation, depth and select only those
558                # values where minimum_storable_height is exceeded
559                Q = domain.quantities['stage']
560                A, _ = Q.get_vertex_values(xy=False, precision=self.precision)
561                z = fid.variables['elevation']
562
563                storable_indices = (A-z[:] >= self.minimum_storable_height)
564                stage = num.choose(storable_indices, (z[:], A))
565
566                # Define a zero vector of same size and type as A
567                # for use with momenta
568                null = num.zeros(num.size(A), A.dtype.char) #??#
569
570                # Get xmomentum where depth exceeds minimum_storable_height
571                Q = domain.quantities['xmomentum']
572                xmom, _ = Q.get_vertex_values(xy=False,
573                                              precision=self.precision)
574                xmomentum = num.choose(storable_indices, (null, xmom))
575
576
577                # Get ymomentum where depth exceeds minimum_storable_height
578                Q = domain.quantities['ymomentum']
579                ymom, _ = Q.get_vertex_values(xy=False,
580                                              precision=self.precision)
581                ymomentum = num.choose(storable_indices, (null, ymom))
582
583                # Write quantities to underlying data  file
584                self.writer.store_quantities(fid,
585                                             time=self.domain.time,
586                                             sww_precision=self.precision,
587                                             stage=stage,
588                                             xmomentum=xmomentum,
589                                             ymomentum=ymomentum)
590            else:
591                msg = 'Quantities stored must be: stage, xmomentum, ymomentum, '
592                msg += 'but I got: ' + str(names)
593                raise Exception, msg
594
595            # Update extrema if requested
596            domain = self.domain
597            if domain.quantities_to_be_monitored is not None:
598                for q, info in domain.quantities_to_be_monitored.items():
599                    if info['min'] is not None:
600                        fid.variables[q + '.extrema'][0] = info['min']
601                        fid.variables[q + '.min_location'][:] = \
602                                        info['min_location']
603                        fid.variables[q + '.min_time'][0] = info['min_time']
604
605                    if info['max'] is not None:
606                        fid.variables[q + '.extrema'][1] = info['max']
607                        fid.variables[q + '.max_location'][:] = \
608                                        info['max_location']
609                        fid.variables[q + '.max_time'][0] = info['max_time']
610
611            # Flush and close
612            fid.sync()
613            fid.close()
614
615
616##
617# @brief Class for handling checkpoints data
618class Data_format_cpt(Data_format):
619    """Interface to native NetCDF format (.cpt)
620    """
621
622    ##
623    # @brief Initialize this instantiation.
624    # @param domain ??
625    # @param mode Mode of underlying data file (default WRITE).
626    def __init__(self, domain, mode=netcdf_mode_w):
627        from Scientific.IO.NetCDF import NetCDFFile
628
629        self.precision = netcdf_float #Use full precision
630
631        Data_format.__init__(self, domain, 'sww', mode)
632
633        # NetCDF file definition
634        fid = NetCDFFile(self.filename, mode)
635        if mode[0] == 'w':
636            #Create new file
637            fid.institution = 'Geoscience Australia'
638            fid.description = 'Checkpoint data'
639            #fid.smooth = domain.smooth
640            fid.order = domain.default_order
641
642            # dimension definitions
643            fid.createDimension('number_of_volumes', self.number_of_volumes)
644            fid.createDimension('number_of_vertices', 3)
645
646            #Store info at all vertices (no smoothing)
647            fid.createDimension('number_of_points', 3*self.number_of_volumes)
648            fid.createDimension('number_of_timesteps', None) #extensible
649
650            # variable definitions
651
652            #Mesh
653            fid.createVariable('x', self.precision, ('number_of_points',))
654            fid.createVariable('y', self.precision, ('number_of_points',))
655
656
657            fid.createVariable('volumes', netcdf_int, ('number_of_volumes',
658                                                       'number_of_vertices'))
659
660            fid.createVariable('time', self.precision, ('number_of_timesteps',))
661
662            #Allocate space for all quantities
663            for name in domain.quantities.keys():
664                fid.createVariable(name, self.precision,
665                                   ('number_of_timesteps',
666                                    'number_of_points'))
667
668        fid.close()
669
670    ##
671    # @brief Store connectivity data to underlying data file.
672    def store_checkpoint(self):
673        """Write x,y coordinates of triangles.
674        Write connectivity (
675        constituting
676        the bed elevation.
677        """
678
679        from Scientific.IO.NetCDF import NetCDFFile
680
681        domain = self.domain
682
683        #Get NetCDF
684        fid = NetCDFFile(self.filename, netcdf_mode_a)
685
686        # Get the variables
687        x = fid.variables['x']
688        y = fid.variables['y']
689
690        volumes = fid.variables['volumes']
691
692        # Get X, Y and bed elevation Z
693        Q = domain.quantities['elevation']
694        X,Y,Z,V = Q.get_vertex_values(xy=True, precision=self.precision)
695
696        x[:] = X.astype(self.precision)
697        y[:] = Y.astype(self.precision)
698        z[:] = Z.astype(self.precision)
699
700        volumes[:] = V
701
702        fid.close()
703
704    ##
705    # @brief Store tiem and named quantities to underlying data file.
706    # @param name
707    def store_timestep(self, name):
708        """Store time and named quantity to file
709        """
710
711        from Scientific.IO.NetCDF import NetCDFFile
712        from time import sleep
713
714        #Get NetCDF
715        retries = 0
716        file_open = False
717        while not file_open and retries < 10:
718            try:
719                fid = NetCDFFile(self.filename, netcdf_mode_a)
720            except IOError:
721                #This could happen if someone was reading the file.
722                #In that case, wait a while and try again
723                msg = 'Warning (store_timestep): File %s could not be opened' \
724                      ' - trying again' % self.filename
725                print msg
726                retries += 1
727                sleep(1)
728            else:
729                file_open = True
730
731        if not file_open:
732            msg = 'File %s could not be opened for append' % self.filename
733            raise DataFileNotOpenError, msg
734
735        domain = self.domain
736
737        # Get the variables
738        time = fid.variables['time']
739        stage = fid.variables['stage']
740        i = len(time)
741
742        #Store stage
743        time[i] = self.domain.time
744
745        # Get quantity
746        Q = domain.quantities[name]
747        A,V = Q.get_vertex_values(xy=False, precision=self.precision)
748
749        stage[i,:] = A.astype(self.precision)
750
751        #Flush and close
752        fid.sync()
753        fid.close()
754
755
756##
757# @brief Class for National Exposure Database storage (NEXIS).
758
759LAT_TITLE = 'LATITUDE'
760LONG_TITLE = 'LONGITUDE'
761X_TITLE = 'x'
762Y_TITLE = 'y'
763
764class Exposure_csv:
765
766    ##
767    # @brief Instantiate this instance.
768    # @param file_name Name of underlying data file.
769    # @param latitude_title ??
770    # @param longitude_title ??
771    # @param is_x_y_locations ??
772    # @param x_title ??
773    # @param y_title ??
774    # @param refine_polygon ??
775    # @param title_check_list ??
776    def __init__(self,file_name, latitude_title=LAT_TITLE,
777                 longitude_title=LONG_TITLE, is_x_y_locations=None,
778                 x_title=X_TITLE, y_title=Y_TITLE,
779                 refine_polygon=None, title_check_list=None):
780        """
781        This class is for handling the exposure csv file.
782        It reads the file in and converts the lats and longs to a geospatial
783        data object.
784        Use the methods to read and write columns.
785
786        The format of the csv files it reads is;
787           The first row is a title row.
788           comma's are the delimiters
789           each column is a 'set' of data
790
791        Feel free to use/expand it to read other csv files.
792
793        It is not for adding and deleting rows
794
795        Can geospatial handle string attributes? It's not made for them.
796        Currently it can't load and save string att's.
797
798        So just use geospatial to hold the x, y and georef? Bad, since
799        different att's are in diferent structures.  Not so bad, the info
800        to write if the .csv file is saved is in attribute_dic
801
802        The location info is in the geospatial attribute.
803        """
804
805        self._file_name = file_name
806        self._geospatial = None #
807
808        # self._attribute_dic is a dictionary.
809        #The keys are the column titles.
810        #The values are lists of column data
811
812        # self._title_index_dic is a dictionary.
813        #The keys are the column titles.
814        #The values are the index positions of file columns.
815        self._attribute_dic, self._title_index_dic = \
816            csv2dict(self._file_name, title_check_list=title_check_list)
817        try:
818            #Have code here that handles caps or lower
819            lats = self._attribute_dic[latitude_title]
820            longs = self._attribute_dic[longitude_title]
821        except KeyError:
822            # maybe a warning..
823            #Let's see if this works..
824            if False != is_x_y_locations:
825                is_x_y_locations = True
826            pass
827        else:
828            self._geospatial = Geospatial_data(latitudes=lats,
829                                               longitudes=longs)
830
831        if is_x_y_locations is True:
832            if self._geospatial is not None:
833                pass #fixme throw an error
834            try:
835                xs = self._attribute_dic[x_title]
836                ys = self._attribute_dic[y_title]
837                points = [[float(i),float(j)] for i,j in map(None,xs,ys)]
838            except KeyError:
839                # maybe a warning..
840                msg = "Could not find location information."
841                raise TitleValueError, msg
842            else:
843                self._geospatial = Geospatial_data(data_points=points)
844
845        # create a list of points that are in the refining_polygon
846        # described by a list of indexes representing the points
847
848    ##
849    # @brief Create a comparison method.
850    # @param self This object.
851    # @param other The other object.
852    # @return True if objects are 'same'.
853    def __cmp__(self, other):
854        #check that 'other' is an instance of this class
855        if isinstance(self, type(other)):
856            result = cmp(self._attribute_dic, other._attribute_dic)
857            if result <> 0:
858                return result
859
860            # The order of the columns is important. Therefore..
861            result = cmp(self._title_index_dic, other._title_index_dic)
862            if result <> 0:
863                return result
864            for self_ls, other_ls in map(None, self._attribute_dic,
865                                         other._attribute_dic):
866                result = cmp(self._attribute_dic[self_ls],
867                             other._attribute_dic[other_ls])
868                if result <> 0:
869                    return result
870            return 0
871        else:
872            return 1
873
874    ##
875    # @brief Get a list of column values given a column name.
876    # @param column_name The name of the column to get values from.
877    # @param use_refind_polygon Unused??
878    def get_column(self, column_name, use_refind_polygon=False):
879        """
880        Given a column name return a list of the column values
881
882        Note, the type of the values will be String!
883        do this to change a list of strings to a list of floats
884        time = [float(x) for x in time]
885
886        Not implemented:
887        if use_refind_polygon is True, only return values in the
888        refined polygon
889        """
890
891        if not self._attribute_dic.has_key(column_name):
892            msg = 'There is no column called %s!' % column_name
893            raise TitleValueError, msg
894
895        return self._attribute_dic[column_name]
896
897    ##
898    # @brief ??
899    # @param value_column_name ??
900    # @param known_column_name ??
901    # @param known_values ??
902    # @param use_refind_polygon ??
903    def get_value(self, value_column_name, known_column_name,
904                  known_values, use_refind_polygon=False):
905        """
906        Do linear interpolation on the known_colum, using the known_value,
907        to return a value of the column_value_name.
908        """
909
910        pass
911
912    ##
913    # @brief Get a geospatial object that describes the locations.
914    # @param use_refind_polygon Unused??
915    def get_location(self, use_refind_polygon=False):
916        """
917        Return a geospatial object which describes the
918        locations of the location file.
919
920        Note, if there is not location info, this returns None.
921
922        Not implemented:
923        if use_refind_polygon is True, only return values in the
924        refined polygon
925        """
926
927        return self._geospatial
928
929    ##
930    # @brief Add column to 'end' of CSV data.
931    # @param column_name The new column name.
932    # @param column_values The new column values.
933    # @param overwrite If True, overwrites last column, doesn't add at end.
934    def set_column(self, column_name, column_values, overwrite=False):
935        """
936        Add a column to the 'end' (with the right most column being the end)
937        of the csv file.
938
939        Set overwrite to True if you want to overwrite a column.
940
941        Note, in column_name white space is removed and case is not checked.
942        Precondition
943        The column_name and column_values cannot have comma's in it.
944        """
945
946        # sanity checks
947        value_row_count = \
948                len(self._attribute_dic[self._title_index_dic.keys()[0]])
949        if len(column_values) <> value_row_count:
950            msg = 'The number of column values must equal the number of rows.'
951            raise DataMissingValuesError, msg
952
953        # check new column name isn't already used, and we aren't overwriting
954        if self._attribute_dic.has_key(column_name):
955            if not overwrite:
956                msg = 'Column name %s already in use!' % column_name
957                raise TitleValueError, msg
958        else:
959            # New title.  Add it to the title index.
960            self._title_index_dic[column_name] = len(self._title_index_dic)
961
962        self._attribute_dic[column_name] = column_values
963
964    ##
965    # @brief Save the exposure CSV  file.
966    # @param file_name If supplied, use this filename, not original.
967    def save(self, file_name=None):
968        """
969        Save the exposure csv file
970        """
971
972        if file_name is None:
973            file_name = self._file_name
974
975        fd = open(file_name, 'wb')
976        writer = csv.writer(fd)
977
978        #Write the title to a cvs file
979        line = [None] * len(self._title_index_dic)
980        for title in self._title_index_dic.iterkeys():
981            line[self._title_index_dic[title]] = title
982        writer.writerow(line)
983
984        # Write the values to a cvs file
985        value_row_count = \
986                len(self._attribute_dic[self._title_index_dic.keys()[0]])
987        for row_i in range(value_row_count):
988            line = [None] * len(self._title_index_dic)
989            for title in self._title_index_dic.iterkeys():
990                line[self._title_index_dic[title]] = \
991                     self._attribute_dic[title][row_i]
992            writer.writerow(line)
993
994
995def csv2building_polygons(file_name,
996                          floor_height=3,
997                          clipping_polygons=None):
998    """
999    Convert CSV files of the form:
1000
1001    easting,northing,id,floors
1002    422664.22,870785.46,2,0
1003    422672.48,870780.14,2,0
1004    422668.17,870772.62,2,0
1005    422660.35,870777.17,2,0
1006    422664.22,870785.46,2,0
1007    422661.30,871215.06,3,1
1008    422667.50,871215.70,3,1
1009    422668.30,871204.86,3,1
1010    422662.21,871204.33,3,1
1011    422661.30,871215.06,3,1
1012
1013    to a dictionary of polygons with id as key.
1014    The associated number of floors are converted to m above MSL and
1015    returned as a separate dictionary also keyed by id.
1016   
1017    Optional parameter floor_height is the height of each building story.
1018    Optional parameter clipping_olygons is a list of polygons selecting
1019    buildings. Any building not in these polygons will be omitted.
1020   
1021    See csv2polygons for more details
1022    """
1023
1024    polygons, values = csv2polygons(file_name,
1025                                    value_name='floors',
1026                                    clipping_polygons=None)   
1027
1028   
1029    heights = {}
1030    for key in values.keys():
1031        v = float(values[key])
1032        heights[key] = v*floor_height
1033       
1034    return polygons, heights               
1035           
1036
1037##
1038# @brief Convert CSV file into a dictionary of polygons and associated values.
1039# @param filename The path to the file to read, value_name name for the 4th column
1040def csv2polygons(file_name,
1041                 value_name='value',
1042                 clipping_polygons=None):
1043    """
1044    Convert CSV files of the form:
1045
1046    easting,northing,id,value
1047    422664.22,870785.46,2,0
1048    422672.48,870780.14,2,0
1049    422668.17,870772.62,2,0
1050    422660.35,870777.17,2,0
1051    422664.22,870785.46,2,0
1052    422661.30,871215.06,3,1
1053    422667.50,871215.70,3,1
1054    422668.30,871204.86,3,1
1055    422662.21,871204.33,3,1
1056    422661.30,871215.06,3,1
1057
1058    to a dictionary of polygons with id as key.
1059    The associated values are returned as a separate dictionary also keyed by id.
1060
1061
1062    easting: x coordinate relative to zone implied by the model
1063    northing: y coordinate relative to zone implied by the model   
1064    id: tag for polygon comprising points with this tag
1065    value: numeral associated with each polygon. These must be the same for all points in each polygon.
1066   
1067    The last header, value, can take on other names such as roughness, floors, etc - or it can be omitted
1068    in which case the returned values will be None
1069   
1070    Eastings and Northings will be returned as floating point values while
1071    id and values will be returned as strings.
1072
1073    Optional argument: clipping_polygons will select only those polygons that are
1074    fully within one or more of the clipping_polygons. In other words any polygon from
1075    the csv file which has at least one point not inside one of the clipping polygons
1076    will be excluded
1077   
1078    See underlying function csv2dict for more details.
1079    """
1080
1081    X, _ = csv2dict(file_name)
1082
1083    msg = 'Polygon csv file must have 3 or 4 columns'
1084    assert len(X.keys()) in [3, 4], msg
1085   
1086    msg = 'Did not find expected column header: easting'
1087    assert 'easting' in X.keys(), msg
1088   
1089    msg = 'Did not find expected column header: northing'   
1090    assert 'northing' in X.keys(), northing
1091   
1092    msg = 'Did not find expected column header: northing'       
1093    assert 'id' in X.keys(), msg
1094   
1095    if value_name is not None:
1096        msg = 'Did not find expected column header: %s' % value_name       
1097        assert value_name in X.keys(), msg   
1098   
1099    polygons = {}
1100    if len(X.keys()) == 4:
1101        values = {}
1102    else:
1103        values = None
1104
1105    # Loop through entries and compose polygons
1106    excluded_polygons={}
1107    past_ids = {}
1108    last_id = None
1109    for i, id in enumerate(X['id']):
1110
1111        # Check for duplicate polygons
1112        if id in past_ids:
1113            msg = 'Polygon %s was duplicated in line %d' % (id, i)
1114            raise Exception, msg
1115       
1116        if id not in polygons:
1117            # Start new polygon
1118            polygons[id] = []
1119            if values is not None:
1120                values[id] = X[value_name][i]
1121
1122            # Keep track of previous polygon ids
1123            if last_id is not None:
1124                past_ids[last_id] = i
1125           
1126        # Append this point to current polygon
1127        point = [float(X['easting'][i]), float(X['northing'][i])]
1128
1129        if clipping_polygons is not None:
1130            exclude=True
1131            for clipping_polygon in clipping_polygons:
1132                if inside_polygon(point, clipping_polygon):
1133                    exclude=False
1134                    break
1135               
1136            if exclude is True:
1137                excluded_polygons[id]=True
1138
1139        polygons[id].append(point)   
1140           
1141        # Check that value is the same across each polygon
1142        msg = 'Values must be the same across each polygon.'
1143        msg += 'I got %s in line %d but it should have been %s' % (X[value_name][i], i, values[id])
1144        assert values[id] == X[value_name][i], msg
1145
1146        last_id = id
1147
1148    # Weed out polygons that were not wholly inside clipping polygons
1149    for id in excluded_polygons:
1150        del polygons[id]
1151       
1152    return polygons, values
1153
1154
1155           
1156           
1157##
1158# @brief Convert CSV file to a dictionary of arrays.
1159# @param file_name The path to the file to read.
1160def csv2array(file_name):
1161    """
1162    Convert CSV files of the form:
1163
1164    time, discharge, velocity
1165    0.0,  1.2,       0.0
1166    0.1,  3.2,       1.1
1167    ...
1168
1169    to a dictionary of numeric arrays.
1170
1171
1172    See underlying function csv2dict for more details.
1173    """
1174
1175    X, _ = csv2dict(file_name)
1176
1177    Y = {}
1178    for key in X.keys():
1179        Y[key] = num.array([float(x) for x in X[key]])
1180
1181    return Y
1182
1183
1184##
1185# @brief Read a CSV file and convert to a dictionary of {key: column}.
1186# @param file_name The path to the file to read.
1187# @param title_check_list List of titles that *must* be columns in the file.
1188# @return Two dicts: ({key:column}, {title:index}).
1189# @note WARNING: Values are returned as strings.
1190def csv2dict(file_name, title_check_list=None):
1191    """
1192    Load in the csv as a dictionary, title as key and column info as value.
1193    Also, create a dictionary, title as key and column index as value,
1194    to keep track of the column order.
1195
1196    Two dictionaries are returned.
1197
1198    WARNING: Values are returned as strings.
1199             Do this to change a list of strings to a list of floats
1200                 time = [float(x) for x in time]
1201    """
1202
1203    # FIXME(Ole): Consider dealing with files without headers
1204    # FIXME(Ole): Consider a wrapper automatically converting text fields
1205    #             to the right type by trying for: int, float, string
1206   
1207    attribute_dic = {}
1208    title_index_dic = {}
1209    titles_stripped = [] # List of titles
1210
1211    reader = csv.reader(file(file_name))
1212
1213    # Read in and manipulate the title info
1214    titles = reader.next()
1215    for i, title in enumerate(titles):
1216        header = title.strip()
1217        titles_stripped.append(header)
1218        title_index_dic[header] = i
1219    title_count = len(titles_stripped)
1220
1221    # Check required columns
1222    if title_check_list is not None:
1223        for title_check in title_check_list:
1224            if not title_index_dic.has_key(title_check):
1225                msg = 'Reading error. This row is not present %s' % title_check
1226                raise IOError, msg
1227
1228    # Create a dictionary of column values, indexed by column title
1229    for line in reader:
1230        n = len(line) # Number of entries
1231        if n != title_count:
1232            msg = 'Entry in file %s had %d columns ' % (file_name, n)
1233            msg += 'although there were %d headers' % title_count
1234            raise IOError, msg
1235        for i, value in enumerate(line):
1236            attribute_dic.setdefault(titles_stripped[i], []).append(value)
1237
1238    return attribute_dic, title_index_dic
1239
1240
1241##
1242# @brief
1243# @param filename
1244# @param x
1245# @param y
1246# @param z
1247def write_obj(filename, x, y, z):
1248    """Store x,y,z vectors into filename (obj format).
1249
1250       Vectors are assumed to have dimension (M,3) where
1251       M corresponds to the number elements.
1252       triangles are assumed to be disconnected
1253
1254       The three numbers in each vector correspond to three vertices,
1255
1256       e.g. the x coordinate of vertex 1 of element i is in x[i,1]
1257    """
1258
1259    import os.path
1260
1261    root, ext = os.path.splitext(filename)
1262    if ext == '.obj':
1263        FN = filename
1264    else:
1265        FN = filename + '.obj'
1266
1267    outfile = open(FN, 'wb')
1268    outfile.write("# Triangulation as an obj file\n")
1269
1270    M, N = x.shape
1271    assert N == 3  #Assuming three vertices per element
1272
1273    for i in range(M):
1274        for j in range(N):
1275            outfile.write("v %f %f %f\n" % (x[i,j], y[i,j], z[i,j]))
1276
1277    for i in range(M):
1278        base = i * N
1279        outfile.write("f %d %d %d\n" % (base+1, base+2, base+3))
1280
1281    outfile.close()
1282
1283
1284#########################################################
1285#Conversion routines
1286########################################################
1287
1288##
1289# @brief Convert SWW data to OBJ data.
1290# @param basefilename Stem of filename, needs size and extension added.
1291# @param size The number of lines to write.
1292def sww2obj(basefilename, size):
1293    """Convert netcdf based data output to obj
1294    """
1295
1296    from Scientific.IO.NetCDF import NetCDFFile
1297
1298    # Get NetCDF
1299    FN = create_filename('.', basefilename, 'sww', size)
1300    print 'Reading from ', FN
1301    fid = NetCDFFile(FN, netcdf_mode_r)  #Open existing file for read
1302
1303    # Get the variables
1304    x = fid.variables['x']
1305    y = fid.variables['y']
1306    z = fid.variables['elevation']
1307    time = fid.variables['time']
1308    stage = fid.variables['stage']
1309
1310    M = size  #Number of lines
1311    xx = num.zeros((M,3), num.float)
1312    yy = num.zeros((M,3), num.float)
1313    zz = num.zeros((M,3), num.float)
1314
1315    for i in range(M):
1316        for j in range(3):
1317            xx[i,j] = x[i+j*M]
1318            yy[i,j] = y[i+j*M]
1319            zz[i,j] = z[i+j*M]
1320
1321    # Write obj for bathymetry
1322    FN = create_filename('.', basefilename, 'obj', size)
1323    write_obj(FN,xx,yy,zz)
1324
1325    # Now read all the data with variable information, combine with
1326    # x,y info and store as obj
1327    for k in range(len(time)):
1328        t = time[k]
1329        print 'Processing timestep %f' %t
1330
1331        for i in range(M):
1332            for j in range(3):
1333                zz[i,j] = stage[k,i+j*M]
1334
1335        #Write obj for variable data
1336        #FN = create_filename(basefilename, 'obj', size, time=t)
1337        FN = create_filename('.', basefilename[:5], 'obj', size, time=t)
1338        write_obj(FN, xx, yy, zz)
1339
1340
1341##
1342# @brief
1343# @param basefilename Stem of filename, needs size and extension added.
1344def dat2obj(basefilename):
1345    """Convert line based data output to obj
1346    FIXME: Obsolete?
1347    """
1348
1349    import glob, os
1350    from anuga.config import data_dir
1351
1352    # Get bathymetry and x,y's
1353    lines = open(data_dir+os.sep+basefilename+'_geometry.dat', 'r').readlines()
1354
1355    M = len(lines)  #Number of lines
1356    x = num.zeros((M,3), num.float)
1357    y = num.zeros((M,3), num.float)
1358    z = num.zeros((M,3), num.float)
1359
1360    for i, line in enumerate(lines):
1361        tokens = line.split()
1362        values = map(float, tokens)
1363
1364        for j in range(3):
1365            x[i,j] = values[j*3]
1366            y[i,j] = values[j*3+1]
1367            z[i,j] = values[j*3+2]
1368
1369    # Write obj for bathymetry
1370    write_obj(data_dir + os.sep + basefilename + '_geometry', x, y, z)
1371
1372    # Now read all the data files with variable information, combine with
1373    # x,y info and store as obj.
1374
1375    files = glob.glob(data_dir + os.sep + basefilename + '*.dat')
1376    for filename in files:
1377        print 'Processing %s' % filename
1378
1379        lines = open(data_dir + os.sep + filename, 'r').readlines()
1380        assert len(lines) == M
1381        root, ext = os.path.splitext(filename)
1382
1383        # Get time from filename
1384        i0 = filename.find('_time=')
1385        if i0 == -1:
1386            #Skip bathymetry file
1387            continue
1388
1389        i0 += 6  #Position where time starts
1390        i1 = filename.find('.dat')
1391
1392        if i1 > i0:
1393            t = float(filename[i0:i1])
1394        else:
1395            raise DataTimeError, 'Hmmmm'
1396
1397        for i, line in enumerate(lines):
1398            tokens = line.split()
1399            values = map(float,tokens)
1400
1401            for j in range(3):
1402                z[i,j] = values[j]
1403
1404        # Write obj for variable data
1405        write_obj(data_dir + os.sep + basefilename + '_time=%.4f' % t, x, y, z)
1406
1407
1408##
1409# @brief Filter data file, selecting timesteps first:step:last.
1410# @param filename1 Data file to filter.
1411# @param filename2 File to write filtered timesteps to.
1412# @param first First timestep.
1413# @param last Last timestep.
1414# @param step Timestep stride.
1415def filter_netcdf(filename1, filename2, first=0, last=None, step=1):
1416    """Filter data file, selecting timesteps first:step:last.
1417   
1418    Read netcdf filename1, pick timesteps first:step:last and save to
1419    nettcdf file filename2
1420    """
1421
1422    from Scientific.IO.NetCDF import NetCDFFile
1423
1424    # Get NetCDF
1425    infile = NetCDFFile(filename1, netcdf_mode_r)  #Open existing file for read
1426    outfile = NetCDFFile(filename2, netcdf_mode_w)  #Open new file
1427
1428    # Copy dimensions
1429    for d in infile.dimensions:
1430        outfile.createDimension(d, infile.dimensions[d])
1431
1432    # Copy variable definitions
1433    for name in infile.variables:
1434        var = infile.variables[name]
1435        outfile.createVariable(name, var.dtype.char, var.dimensions)    #??#
1436
1437    # Copy the static variables
1438    for name in infile.variables:
1439        if name == 'time' or name == 'stage':
1440            pass
1441        else:
1442            outfile.variables[name][:] = infile.variables[name][:]
1443
1444    # Copy selected timesteps
1445    time = infile.variables['time']
1446    stage = infile.variables['stage']
1447
1448    newtime = outfile.variables['time']
1449    newstage = outfile.variables['stage']
1450
1451    if last is None:
1452        last = len(time)
1453
1454    selection = range(first, last, step)
1455    for i, j in enumerate(selection):
1456        print 'Copying timestep %d of %d (%f)' % (j, last-first, time[j])
1457        newtime[i] = time[j]
1458        newstage[i,:] = stage[j,:]
1459
1460    # Close
1461    infile.close()
1462    outfile.close()
1463
1464
1465##
1466# @brief Return instance of class of given format using filename.
1467# @param domain Data domain (eg, 'sww', etc).
1468# @param mode The mode to open domain in.
1469# @return A class instance of required domain and mode.
1470#Get data objects
1471def get_dataobject(domain, mode=netcdf_mode_w):
1472    """Return instance of class of given format using filename
1473    """
1474
1475    cls = eval('Data_format_%s' % domain.format)
1476    return cls(domain, mode)
1477
1478
1479##
1480# @brief Convert DEM data  to PTS data.
1481# @param basename_in Stem of input filename.
1482# @param basename_out Stem of output filename.
1483# @param easting_min
1484# @param easting_max
1485# @param northing_min
1486# @param northing_max
1487# @param use_cache
1488# @param verbose
1489# @return
1490def dem2pts(basename_in, basename_out=None,
1491            easting_min=None, easting_max=None,
1492            northing_min=None, northing_max=None,
1493            use_cache=False, verbose=False,):
1494    """Read Digitial Elevation model from the following NetCDF format (.dem)
1495
1496    Example:
1497
1498    ncols         3121
1499    nrows         1800
1500    xllcorner     722000
1501    yllcorner     5893000
1502    cellsize      25
1503    NODATA_value  -9999
1504    138.3698 137.4194 136.5062 135.5558 ..........
1505
1506    Convert to NetCDF pts format which is
1507
1508    points:  (Nx2) float array
1509    elevation: N float array
1510    """
1511
1512    kwargs = {'basename_out': basename_out,
1513              'easting_min': easting_min,
1514              'easting_max': easting_max,
1515              'northing_min': northing_min,
1516              'northing_max': northing_max,
1517              'verbose': verbose}
1518
1519    if use_cache is True:
1520        from caching import cache
1521        result = cache(_dem2pts, basename_in, kwargs,
1522                       dependencies = [basename_in + '.dem'],
1523                       verbose = verbose)
1524
1525    else:
1526        result = apply(_dem2pts, [basename_in], kwargs)
1527
1528    return result
1529
1530
1531##
1532# @brief
1533# @param basename_in
1534# @param basename_out
1535# @param verbose
1536# @param easting_min
1537# @param easting_max
1538# @param northing_min
1539# @param northing_max
1540def _dem2pts(basename_in, basename_out=None, verbose=False,
1541            easting_min=None, easting_max=None,
1542            northing_min=None, northing_max=None):
1543    """Read Digitial Elevation model from the following NetCDF format (.dem)
1544
1545    Internal function. See public function dem2pts for details.
1546    """
1547
1548    # FIXME: Can this be written feasibly using write_pts?
1549
1550    import os
1551    from Scientific.IO.NetCDF import NetCDFFile
1552
1553    root = basename_in
1554
1555    # Get NetCDF
1556    infile = NetCDFFile(root + '.dem', netcdf_mode_r) 
1557
1558    if verbose: print 'Reading DEM from %s' %(root + '.dem')
1559
1560    ncols = infile.ncols[0]
1561    nrows = infile.nrows[0]
1562    xllcorner = infile.xllcorner[0]  # Easting of lower left corner
1563    yllcorner = infile.yllcorner[0]  # Northing of lower left corner
1564    cellsize = infile.cellsize[0]
1565    NODATA_value = infile.NODATA_value[0]
1566    dem_elevation = infile.variables['elevation']
1567
1568    zone = infile.zone[0]
1569    false_easting = infile.false_easting[0]
1570    false_northing = infile.false_northing[0]
1571
1572    # Text strings
1573    projection = infile.projection
1574    datum = infile.datum
1575    units = infile.units
1576
1577    # Get output file
1578    if basename_out == None:
1579        ptsname = root + '.pts'
1580    else:
1581        ptsname = basename_out + '.pts'
1582
1583    if verbose: print 'Store to NetCDF file %s' %ptsname
1584
1585    # NetCDF file definition
1586    outfile = NetCDFFile(ptsname, netcdf_mode_w)
1587
1588    # Create new file
1589    outfile.institution = 'Geoscience Australia'
1590    outfile.description = 'NetCDF pts format for compact and portable ' \
1591                          'storage of spatial point data'
1592
1593    # Assign default values
1594    if easting_min is None: easting_min = xllcorner
1595    if easting_max is None: easting_max = xllcorner + ncols*cellsize
1596    if northing_min is None: northing_min = yllcorner
1597    if northing_max is None: northing_max = yllcorner + nrows*cellsize
1598
1599    # Compute offsets to update georeferencing
1600    easting_offset = xllcorner - easting_min
1601    northing_offset = yllcorner - northing_min
1602
1603    # Georeferencing
1604    outfile.zone = zone
1605    outfile.xllcorner = easting_min # Easting of lower left corner
1606    outfile.yllcorner = northing_min # Northing of lower left corner
1607    outfile.false_easting = false_easting
1608    outfile.false_northing = false_northing
1609
1610    outfile.projection = projection
1611    outfile.datum = datum
1612    outfile.units = units
1613
1614    # Grid info (FIXME: probably not going to be used, but heck)
1615    outfile.ncols = ncols
1616    outfile.nrows = nrows
1617
1618    dem_elevation_r = num.reshape(dem_elevation, (nrows, ncols))
1619    totalnopoints = nrows*ncols
1620
1621    # Calculating number of NODATA_values for each row in clipped region
1622    # FIXME: use array operations to do faster
1623    nn = 0
1624    k = 0
1625    i1_0 = 0
1626    j1_0 = 0
1627    thisj = 0
1628    thisi = 0
1629    for i in range(nrows):
1630        y = (nrows-i-1)*cellsize + yllcorner
1631        for j in range(ncols):
1632            x = j*cellsize + xllcorner
1633            if easting_min <= x <= easting_max \
1634               and northing_min <= y <= northing_max:
1635                thisj = j
1636                thisi = i
1637                if dem_elevation_r[i,j] == NODATA_value:
1638                    nn += 1
1639
1640                if k == 0:
1641                    i1_0 = i
1642                    j1_0 = j
1643
1644                k += 1
1645
1646    index1 = j1_0
1647    index2 = thisj
1648
1649    # Dimension definitions
1650    nrows_in_bounding_box = int(round((northing_max-northing_min)/cellsize))
1651    ncols_in_bounding_box = int(round((easting_max-easting_min)/cellsize))
1652
1653    clippednopoints = (thisi+1-i1_0)*(thisj+1-j1_0)
1654    nopoints = clippednopoints-nn
1655
1656    clipped_dem_elev = dem_elevation_r[i1_0:thisi+1,j1_0:thisj+1]
1657
1658    if verbose:
1659        print 'There are %d values in the elevation' %totalnopoints
1660        print 'There are %d values in the clipped elevation' %clippednopoints
1661        print 'There are %d NODATA_values in the clipped elevation' %nn
1662
1663    outfile.createDimension('number_of_points', nopoints)
1664    outfile.createDimension('number_of_dimensions', 2) #This is 2d data
1665
1666    # Variable definitions
1667    outfile.createVariable('points', netcdf_float, ('number_of_points',
1668                                                    'number_of_dimensions'))
1669    outfile.createVariable('elevation', netcdf_float, ('number_of_points',))
1670
1671    # Get handles to the variables
1672    points = outfile.variables['points']
1673    elevation = outfile.variables['elevation']
1674
1675    lenv = index2-index1+1
1676
1677    # Store data
1678    global_index = 0
1679    # for i in range(nrows):
1680    for i in range(i1_0, thisi+1, 1):
1681        if verbose and i % ((nrows+10)/10) == 0:
1682            print 'Processing row %d of %d' % (i, nrows)
1683
1684        lower_index = global_index
1685
1686        v = dem_elevation_r[i,index1:index2+1]
1687        no_NODATA = num.sum(v == NODATA_value)
1688        if no_NODATA > 0:
1689            newcols = lenv - no_NODATA  # ncols_in_bounding_box - no_NODATA
1690        else:
1691            newcols = lenv              # ncols_in_bounding_box
1692
1693        telev = num.zeros(newcols, num.float)
1694        tpoints = num.zeros((newcols, 2), num.float)
1695
1696        local_index = 0
1697
1698        y = (nrows-i-1)*cellsize + yllcorner
1699        #for j in range(ncols):
1700        for j in range(j1_0,index2+1,1):
1701            x = j*cellsize + xllcorner
1702            if easting_min <= x <= easting_max \
1703               and northing_min <= y <= northing_max \
1704               and dem_elevation_r[i,j] <> NODATA_value:
1705                tpoints[local_index, :] = [x-easting_min, y-northing_min]
1706                telev[local_index] = dem_elevation_r[i, j]
1707                global_index += 1
1708                local_index += 1
1709
1710        upper_index = global_index
1711
1712        if upper_index == lower_index + newcols:
1713            points[lower_index:upper_index, :] = tpoints
1714            elevation[lower_index:upper_index] = telev
1715
1716    assert global_index == nopoints, 'index not equal to number of points'
1717
1718    infile.close()
1719    outfile.close()
1720
1721
1722##
1723# @brief  Return block of surface lines for each cross section
1724# @param lines Iterble  of text lines to process.
1725# @note BROKEN?  UNUSED?
1726def _read_hecras_cross_sections(lines):
1727    """Return block of surface lines for each cross section
1728    Starts with SURFACE LINE,
1729    Ends with END CROSS-SECTION
1730    """
1731
1732    points = []
1733
1734    reading_surface = False
1735    for i, line in enumerate(lines):
1736        if len(line.strip()) == 0:    #Ignore blanks
1737            continue
1738
1739        if lines[i].strip().startswith('SURFACE LINE'):
1740            reading_surface = True
1741            continue
1742
1743        if lines[i].strip().startswith('END') and reading_surface:
1744            yield points
1745            reading_surface = False
1746            points = []
1747
1748        if reading_surface:
1749            fields = line.strip().split(',')
1750            easting = float(fields[0])
1751            northing = float(fields[1])
1752            elevation = float(fields[2])
1753            points.append([easting, northing, elevation])
1754
1755
1756##
1757# @brief Convert HECRAS (.sdf) file to PTS file.
1758# @param basename_in Sterm of input filename.
1759# @param basename_out Sterm of output filename.
1760# @param verbose True if this function is to be verbose.
1761def hecras_cross_sections2pts(basename_in,
1762                              basename_out=None,
1763                              verbose=False):
1764    """Read HEC-RAS Elevation datal from the following ASCII format (.sdf)
1765
1766    Example:
1767
1768# RAS export file created on Mon 15Aug2005 11:42
1769# by HEC-RAS Version 3.1.1
1770
1771BEGIN HEADER:
1772  UNITS: METRIC
1773  DTM TYPE: TIN
1774  DTM: v:\1\cit\perth_topo\river_tin
1775  STREAM LAYER: c:\local\hecras\21_02_03\up_canning_cent3d.shp
1776  CROSS-SECTION LAYER: c:\local\hecras\21_02_03\up_can_xs3d.shp
1777  MAP PROJECTION: UTM
1778  PROJECTION ZONE: 50
1779  DATUM: AGD66
1780  VERTICAL DATUM:
1781  NUMBER OF REACHES:  19
1782  NUMBER OF CROSS-SECTIONS:  14206
1783END HEADER:
1784
1785Only the SURFACE LINE data of the following form will be utilised
1786  CROSS-SECTION:
1787    STREAM ID:Southern-Wungong
1788    REACH ID:Southern-Wungong
1789    STATION:19040.*
1790    CUT LINE:
1791      405548.671603161 , 6438142.7594925
1792      405734.536092045 , 6438326.10404912
1793      405745.130459356 , 6438331.48627354
1794      405813.89633823 , 6438368.6272789
1795    SURFACE LINE:
1796     405548.67,   6438142.76,   35.37
1797     405552.24,   6438146.28,   35.41
1798     405554.78,   6438148.78,   35.44
1799     405555.80,   6438149.79,   35.44
1800     405559.37,   6438153.31,   35.45
1801     405560.88,   6438154.81,   35.44
1802     405562.93,   6438156.83,   35.42
1803     405566.50,   6438160.35,   35.38
1804     405566.99,   6438160.83,   35.37
1805     ...
1806   END CROSS-SECTION
1807
1808    Convert to NetCDF pts format which is
1809
1810    points:  (Nx2) float array
1811    elevation: N float array
1812    """
1813
1814    import os
1815    from Scientific.IO.NetCDF import NetCDFFile
1816
1817    root = basename_in
1818
1819    # Get ASCII file
1820    infile = open(root + '.sdf', 'r')
1821
1822    if verbose: print 'Reading DEM from %s' %(root + '.sdf')
1823
1824    lines = infile.readlines()
1825    infile.close()
1826
1827    if verbose: print 'Converting to pts format'
1828
1829    # Scan through the header, picking up stuff we need.
1830    i = 0
1831    while lines[i].strip() == '' or lines[i].strip().startswith('#'):
1832        i += 1
1833
1834    assert lines[i].strip().upper() == 'BEGIN HEADER:'
1835    i += 1
1836
1837    assert lines[i].strip().upper().startswith('UNITS:')
1838    units = lines[i].strip().split()[1]
1839    i += 1
1840
1841    assert lines[i].strip().upper().startswith('DTM TYPE:')
1842    i += 1
1843
1844    assert lines[i].strip().upper().startswith('DTM:')
1845    i += 1
1846
1847    assert lines[i].strip().upper().startswith('STREAM')
1848    i += 1
1849
1850    assert lines[i].strip().upper().startswith('CROSS')
1851    i += 1
1852
1853    assert lines[i].strip().upper().startswith('MAP PROJECTION:')
1854    projection = lines[i].strip().split(':')[1]
1855    i += 1
1856
1857    assert lines[i].strip().upper().startswith('PROJECTION ZONE:')
1858    zone = int(lines[i].strip().split(':')[1])
1859    i += 1
1860
1861    assert lines[i].strip().upper().startswith('DATUM:')
1862    datum = lines[i].strip().split(':')[1]
1863    i += 1
1864
1865    assert lines[i].strip().upper().startswith('VERTICAL DATUM:')
1866    i += 1
1867
1868    assert lines[i].strip().upper().startswith('NUMBER OF REACHES:')
1869    i += 1
1870
1871    assert lines[i].strip().upper().startswith('NUMBER OF CROSS-SECTIONS:')
1872    number_of_cross_sections = int(lines[i].strip().split(':')[1])
1873    i += 1
1874
1875    # Now read all points
1876    points = []
1877    elevation = []
1878    for j, entries in enumerate(_read_hecras_cross_sections(lines[i:])):
1879        for k, entry in enumerate(entries):
1880            points.append(entry[:2])
1881            elevation.append(entry[2])
1882
1883    msg = 'Actual #number_of_cross_sections == %d, Reported as %d'\
1884          %(j+1, number_of_cross_sections)
1885    assert j+1 == number_of_cross_sections, msg
1886
1887    # Get output file, write PTS data
1888    if basename_out == None:
1889        ptsname = root + '.pts'
1890    else:
1891        ptsname = basename_out + '.pts'
1892
1893    geo_ref = Geo_reference(zone, 0, 0, datum, projection, units)
1894    geo = Geospatial_data(points, {"elevation":elevation},
1895                          verbose=verbose, geo_reference=geo_ref)
1896    geo.export_points_file(ptsname)
1897
1898
1899##
1900# @brief
1901# @param basename_in
1902# @param extra_name_out
1903# @param quantities
1904# @param timestep
1905# @param reduction
1906# @param cellsize
1907# @param number_of_decimal_places
1908# @param NODATA_value
1909# @param easting_min
1910# @param easting_max
1911# @param northing_min
1912# @param northing_max
1913# @param verbose
1914# @param origin
1915# @param datum
1916# @param format
1917# @return
1918def export_grid(basename_in, extra_name_out=None,
1919                quantities=None, # defaults to elevation
1920                timestep=None,
1921                reduction=None,
1922                cellsize=10,
1923                number_of_decimal_places=None,
1924                NODATA_value=-9999,
1925                easting_min=None,
1926                easting_max=None,
1927                northing_min=None,
1928                northing_max=None,
1929                verbose=False,
1930                origin=None,
1931                datum='WGS84',
1932                format='ers'):
1933    """Wrapper for sww2dem.
1934    See sww2dem to find out what most of the parameters do.
1935
1936    Quantities is a list of quantities.  Each quantity will be
1937    calculated for each sww file.
1938
1939    This returns the basenames of the files returned, which is made up
1940    of the dir and all of the file name, except the extension.
1941
1942    This function returns the names of the files produced.
1943
1944    It will also produce as many output files as there are input sww files.
1945    """
1946
1947    if quantities is None:
1948        quantities = ['elevation']
1949
1950    if type(quantities) is str:
1951            quantities = [quantities]
1952
1953    # How many sww files are there?
1954    dir, base = os.path.split(basename_in)
1955
1956    iterate_over = get_all_swwfiles(dir, base, verbose)
1957
1958    if dir == "":
1959        dir = "." # Unix compatibility
1960
1961    files_out = []
1962    for sww_file in iterate_over:
1963        for quantity in quantities:
1964            if extra_name_out is None:
1965                basename_out = sww_file + '_' + quantity
1966            else:
1967                basename_out = sww_file + '_' + quantity + '_' + extra_name_out
1968
1969            file_out = sww2dem(dir+sep+sww_file, dir+sep+basename_out,
1970                               quantity,
1971                               timestep,
1972                               reduction,
1973                               cellsize,
1974                               number_of_decimal_places,
1975                               NODATA_value,
1976                               easting_min,
1977                               easting_max,
1978                               northing_min,
1979                               northing_max,
1980                               verbose,
1981                               origin,
1982                               datum,
1983                               format)
1984            files_out.append(file_out)
1985    return files_out
1986
1987
1988##
1989# @brief
1990# @param production_dirs
1991# @param output_dir
1992# @param scenario_name
1993# @param gauges_dir_name
1994# @param plot_quantity
1995# @param generate_fig
1996# @param reportname
1997# @param surface
1998# @param time_min
1999# @param time_max
2000# @param title_on
2001# @param verbose
2002# @param nodes
2003def get_timeseries(production_dirs, output_dir, scenario_name, gauges_dir_name,
2004                   plot_quantity, generate_fig=False,
2005                   reportname=None, surface=False, time_min=None,
2006                   time_max=None, title_on=False, verbose=True,
2007                   nodes=None):
2008    """
2009    nodes - number of processes used.
2010
2011    warning - this function has no tests
2012    """
2013
2014    if reportname == None:
2015        report = False
2016    else:
2017        report = True
2018
2019    if nodes is None:
2020        is_parallel = False
2021    else:
2022        is_parallel = True
2023
2024    # Generate figures
2025    swwfiles = {}
2026    if is_parallel is True:
2027        for i in range(nodes):
2028            print 'Sending node %d of %d' % (i, nodes)
2029            swwfiles = {}
2030            if not reportname == None:
2031                reportname = report_name + '_%s' % i
2032            for label_id in production_dirs.keys():
2033                if label_id == 'boundaries':
2034                    swwfile = best_boundary_sww
2035                else:
2036                    file_loc = output_dir + label_id + sep
2037                    sww_extra = '_P%s_%s' % (i, nodes)
2038                    swwfile = file_loc + scenario_name + sww_extra + '.sww'
2039                    print 'swwfile', swwfile
2040                    swwfiles[swwfile] = label_id
2041
2042                texname, elev_output = sww2timeseries(swwfiles,
2043                                              gauges_dir_name,
2044                                              production_dirs,
2045                                              report=report,
2046                                              reportname=reportname,
2047                                              plot_quantity=plot_quantity,
2048                                              generate_fig=generate_fig,
2049                                              surface=surface,
2050                                              time_min=time_min,
2051                                              time_max=time_max,
2052                                              title_on=title_on,
2053                                              verbose=verbose)
2054    else:
2055        for label_id in production_dirs.keys():
2056            if label_id == 'boundaries':
2057                print 'boundaries'
2058                file_loc = project.boundaries_in_dir
2059                swwfile = project.boundaries_dir_name3 + '.sww'
2060                #  swwfile = boundary_dir_filename
2061            else:
2062                file_loc = output_dir + label_id + sep
2063                swwfile = file_loc + scenario_name + '.sww'
2064            swwfiles[swwfile] = label_id
2065
2066        texname, elev_output = sww2timeseries(swwfiles,
2067                                              gauges_dir_name,
2068                                              production_dirs,
2069                                              report=report,
2070                                              reportname=reportname,
2071                                              plot_quantity=plot_quantity,
2072                                              generate_fig=generate_fig,
2073                                              surface=surface,
2074                                              time_min=time_min,
2075                                              time_max=time_max,
2076                                              title_on=title_on,
2077                                              verbose=verbose)
2078
2079
2080##
2081# @brief Convert SWW file to DEM file (.asc or .ers).
2082# @param basename_in
2083# @param basename_out
2084# @param quantity
2085# @param timestep
2086# @param reduction
2087# @param cellsize 
2088# @param number_of_decimal_places
2089# @param NODATA_value
2090# @param easting_min
2091# @param easting_max
2092# @param northing_min
2093# @param northing_max
2094# @param verbose
2095# @param origin
2096# @param datum
2097# @param format
2098# @return
2099def sww2dem(basename_in, basename_out=None,
2100            quantity=None, # defaults to elevation
2101            timestep=None,
2102            reduction=None,
2103            cellsize=10,
2104            number_of_decimal_places=None,
2105            NODATA_value=-9999,
2106            easting_min=None,
2107            easting_max=None,
2108            northing_min=None,
2109            northing_max=None,
2110            verbose=False,
2111            origin=None,
2112            datum='WGS84',
2113            format='ers',
2114            block_size=None):
2115    """Read SWW file and convert to Digitial Elevation model format
2116    (.asc or .ers)
2117
2118    Example (ASC):
2119    ncols         3121
2120    nrows         1800
2121    xllcorner     722000
2122    yllcorner     5893000
2123    cellsize      25
2124    NODATA_value  -9999
2125    138.3698 137.4194 136.5062 135.5558 ..........
2126
2127    The number of decimal places can be specified by the user to save
2128    on disk space requirements by specifying in the call to sww2dem.
2129
2130    Also write accompanying file with same basename_in but extension .prj
2131    used to fix the UTM zone, datum, false northings and eastings.
2132
2133    The prj format is assumed to be as
2134
2135    Projection    UTM
2136    Zone          56
2137    Datum         WGS84
2138    Zunits        NO
2139    Units         METERS
2140    Spheroid      WGS84
2141    Xshift        0.0000000000
2142    Yshift        10000000.0000000000
2143    Parameters
2144
2145
2146    The parameter quantity must be the name of an existing quantity or
2147    an expression involving existing quantities. The default is
2148    'elevation'. Quantity is not a list of quantities.
2149
2150    if timestep (an index) is given, output quantity at that timestep
2151
2152    if reduction is given use that to reduce quantity over all timesteps.
2153
2154    datum
2155
2156    format can be either 'asc' or 'ers'
2157    block_size - sets the number of slices along the non-time axis to
2158                 process in one block.
2159    """
2160
2161    import sys
2162
2163    from anuga.utilities.polygon import inside_polygon, outside_polygon, \
2164         separate_points_by_polygon
2165    from anuga.abstract_2d_finite_volumes.util import \
2166         apply_expression_to_dictionary
2167
2168    msg = 'Format must be either asc or ers'
2169    assert format.lower() in ['asc', 'ers'], msg
2170
2171    false_easting = 500000
2172    false_northing = 10000000
2173
2174    if quantity is None:
2175        quantity = 'elevation'
2176
2177    if reduction is None:
2178        reduction = max
2179
2180    if basename_out is None:
2181        basename_out = basename_in + '_%s' % quantity
2182
2183    if quantity_formula.has_key(quantity):
2184        quantity = quantity_formula[quantity]
2185
2186    if number_of_decimal_places is None:
2187        number_of_decimal_places = 3
2188
2189    if block_size is None:
2190        block_size = DEFAULT_BLOCK_SIZE
2191
2192    # Read SWW file
2193    swwfile = basename_in + '.sww'
2194    demfile = basename_out + '.' + format
2195    # Note the use of a .ers extension is optional (write_ermapper_grid will
2196    # deal with either option
2197
2198    # Read sww file
2199    if verbose:
2200        print 'Reading from %s' % swwfile
2201        print 'Output directory is %s' % basename_out
2202
2203    from Scientific.IO.NetCDF import NetCDFFile
2204    fid = NetCDFFile(swwfile)
2205
2206    #Get extent and reference
2207    x = fid.variables['x'][:]
2208    y = fid.variables['y'][:]
2209    volumes = fid.variables['volumes'][:]
2210    if timestep is not None:
2211        times = fid.variables['time'][timestep]
2212    else:
2213        times = fid.variables['time'][:]
2214
2215    number_of_timesteps = fid.dimensions['number_of_timesteps']
2216    number_of_points = fid.dimensions['number_of_points']
2217
2218    if origin is None:
2219        # Get geo_reference
2220        # sww files don't have to have a geo_ref
2221        try:
2222            geo_reference = Geo_reference(NetCDFObject=fid)
2223        except AttributeError, e:
2224            geo_reference = Geo_reference() # Default georef object
2225
2226        xllcorner = geo_reference.get_xllcorner()
2227        yllcorner = geo_reference.get_yllcorner()
2228        zone = geo_reference.get_zone()
2229    else:
2230        zone = origin[0]
2231        xllcorner = origin[1]
2232        yllcorner = origin[2]
2233
2234    # FIXME: Refactor using code from Interpolation_function.statistics
2235    # (in interpolate.py)
2236    # Something like print swwstats(swwname)
2237    if verbose:
2238        print '------------------------------------------------'
2239        print 'Statistics of SWW file:'
2240        print '  Name: %s' %swwfile
2241        print '  Reference:'
2242        print '    Lower left corner: [%f, %f]'\
2243              %(xllcorner, yllcorner)
2244        if timestep is not None:
2245            print '    Time: %f' %(times)
2246        else:
2247            print '    Start time: %f' %fid.starttime[0]
2248        print '  Extent:'
2249        print '    x [m] in [%f, %f], len(x) == %d'\
2250              %(num.min(x), num.max(x), len(x.flat))
2251        print '    y [m] in [%f, %f], len(y) == %d'\
2252              %(num.min(y), num.max(y), len(y.flat))
2253        if timestep is not None:
2254            print '    t [s] = %f, len(t) == %d' %(times, 1)
2255        else:
2256            print '    t [s] in [%f, %f], len(t) == %d'\
2257              %(min(times), max(times), len(times))
2258        print '  Quantities [SI units]:'
2259        # Comment out for reduced memory consumption
2260        for name in ['stage', 'xmomentum', 'ymomentum']:
2261            q = fid.variables[name][:].flatten()
2262            if timestep is not None:
2263                q = q[timestep*len(x):(timestep+1)*len(x)]
2264            if verbose: print '    %s in [%f, %f]' %(name, min(q), max(q))
2265        for name in ['elevation']:
2266            q = fid.variables[name][:].flatten()
2267            if verbose: print '    %s in [%f, %f]' %(name, min(q), max(q))
2268
2269    # Get the variables in the supplied expression.
2270    # This may throw a SyntaxError exception.
2271    var_list = get_vars_in_expression(quantity)
2272
2273    # Check that we have the required variables in the SWW file.
2274    missing_vars = []
2275    for name in var_list:
2276        try:
2277            _ = fid.variables[name]
2278        except:
2279            missing_vars.append(name)
2280    if missing_vars:
2281        msg = ("In expression '%s', variables %s are not in the SWW file '%s'"
2282               % (quantity, swwfile))
2283        raise Exception, msg
2284
2285    # Create result array and start filling, block by block.
2286    result = num.zeros(number_of_points, num.float)
2287
2288    for start_slice in xrange(0, number_of_points, block_size):
2289        # limit slice size to array end if at last block
2290        end_slice = min(start_slice + block_size, number_of_points)
2291       
2292        # get slices of all required variables
2293        q_dict = {}
2294        for name in var_list:
2295            # check if variable has time axis
2296            if len(fid.variables[name].shape) == 2:
2297                q_dict[name] = fid.variables[name][:,start_slice:end_slice]
2298            else:       # no time axis
2299                q_dict[name] = fid.variables[name][start_slice:end_slice]
2300
2301        # Evaluate expression with quantities found in SWW file
2302        res = apply_expression_to_dictionary(quantity, q_dict)
2303
2304        if len(res.shape) == 2:
2305            new_res = num.zeros(res.shape[1], num.float)
2306            for k in xrange(res.shape[1]):
2307                new_res[k] = reduction(res[:,k])
2308            res = new_res
2309
2310        result[start_slice:end_slice] = res
2311                                   
2312    #Post condition: Now q has dimension: number_of_points
2313    assert len(result.shape) == 1
2314    assert result.shape[0] == number_of_points
2315
2316    if verbose:
2317        print 'Processed values for %s are in [%f, %f]' % \
2318              (quantity, min(result), max(result))
2319
2320    #Create grid and update xll/yll corner and x,y
2321    #Relative extent
2322    if easting_min is None:
2323        xmin = min(x)
2324    else:
2325        xmin = easting_min - xllcorner
2326
2327    if easting_max is None:
2328        xmax = max(x)
2329    else:
2330        xmax = easting_max - xllcorner
2331
2332    if northing_min is None:
2333        ymin = min(y)
2334    else:
2335        ymin = northing_min - yllcorner
2336
2337    if northing_max is None:
2338        ymax = max(y)
2339    else:
2340        ymax = northing_max - yllcorner
2341
2342    msg = 'xmax must be greater than or equal to xmin.\n'
2343    msg += 'I got xmin = %f, xmax = %f' %(xmin, xmax)
2344    assert xmax >= xmin, msg
2345
2346    msg = 'ymax must be greater than or equal to xmin.\n'
2347    msg += 'I got ymin = %f, ymax = %f' %(ymin, ymax)
2348    assert ymax >= ymin, msg
2349
2350    if verbose: print 'Creating grid'
2351    ncols = int((xmax-xmin)/cellsize) + 1
2352    nrows = int((ymax-ymin)/cellsize) + 1
2353
2354    #New absolute reference and coordinates
2355    newxllcorner = xmin + xllcorner
2356    newyllcorner = ymin + yllcorner
2357
2358    x = x + xllcorner - newxllcorner
2359    y = y + yllcorner - newyllcorner
2360
2361    vertex_points = num.concatenate ((x[:,num.newaxis], y[:,num.newaxis]), axis=1)
2362    assert len(vertex_points.shape) == 2
2363
2364    grid_points = num.zeros ((ncols*nrows, 2), num.float)
2365
2366    for i in xrange(nrows):
2367        if format.lower() == 'asc':
2368            yg = i * cellsize
2369        else:
2370        #this will flip the order of the y values for ers
2371            yg = (nrows-i) * cellsize
2372
2373        for j in xrange(ncols):
2374            xg = j * cellsize
2375            k = i*ncols + j
2376
2377            grid_points[k, 0] = xg
2378            grid_points[k, 1] = yg
2379
2380    #Interpolate
2381    from anuga.fit_interpolate.interpolate import Interpolate
2382
2383    # Remove loners from vertex_points, volumes here
2384    vertex_points, volumes = remove_lone_verts(vertex_points, volumes)
2385    #export_mesh_file('monkey.tsh',{'vertices':vertex_points, 'triangles':volumes})
2386    #import sys; sys.exit()
2387    interp = Interpolate(vertex_points, volumes, verbose = verbose)
2388
2389    #Interpolate using quantity values
2390    if verbose: print 'Interpolating'
2391    grid_values = interp.interpolate(result, grid_points).flatten()
2392
2393    if verbose:
2394        print 'Interpolated values are in [%f, %f]' %(num.min(grid_values),
2395                                                      num.max(grid_values))
2396
2397    #Assign NODATA_value to all points outside bounding polygon (from interpolation mesh)
2398    P = interp.mesh.get_boundary_polygon()
2399    outside_indices = outside_polygon(grid_points, P, closed=True)
2400
2401    for i in outside_indices:
2402        grid_values[i] = NODATA_value
2403
2404    if format.lower() == 'ers':
2405        # setup ERS header information
2406        grid_values = num.reshape(grid_values, (nrows, ncols))
2407        header = {}
2408        header['datum'] = '"' + datum + '"'
2409        # FIXME The use of hardwired UTM and zone number needs to be made optional
2410        # FIXME Also need an automatic test for coordinate type (i.e. EN or LL)
2411        header['projection'] = '"UTM-' + str(zone) + '"'
2412        header['coordinatetype'] = 'EN'
2413        if header['coordinatetype'] == 'LL':
2414            header['longitude'] = str(newxllcorner)
2415            header['latitude'] = str(newyllcorner)
2416        elif header['coordinatetype'] == 'EN':
2417            header['eastings'] = str(newxllcorner)
2418            header['northings'] = str(newyllcorner)
2419        header['nullcellvalue'] = str(NODATA_value)
2420        header['xdimension'] = str(cellsize)
2421        header['ydimension'] = str(cellsize)
2422        header['value'] = '"' + quantity + '"'
2423        #header['celltype'] = 'IEEE8ByteReal'  #FIXME: Breaks unit test
2424
2425        #Write
2426        if verbose: print 'Writing %s' %demfile
2427
2428        import ermapper_grids
2429
2430        ermapper_grids.write_ermapper_grid(demfile, grid_values, header)
2431
2432        fid.close()
2433    else:
2434        #Write to Ascii format
2435        #Write prj file
2436        prjfile = basename_out + '.prj'
2437
2438        if verbose: print 'Writing %s' %prjfile
2439        prjid = open(prjfile, 'w')
2440        prjid.write('Projection    %s\n' %'UTM')
2441        prjid.write('Zone          %d\n' %zone)
2442        prjid.write('Datum         %s\n' %datum)
2443        prjid.write('Zunits        NO\n')
2444        prjid.write('Units         METERS\n')
2445        prjid.write('Spheroid      %s\n' %datum)
2446        prjid.write('Xshift        %d\n' %false_easting)
2447        prjid.write('Yshift        %d\n' %false_northing)
2448        prjid.write('Parameters\n')
2449        prjid.close()
2450
2451        if verbose: print 'Writing %s' %demfile
2452
2453        ascid = open(demfile, 'w')
2454
2455        ascid.write('ncols         %d\n' %ncols)
2456        ascid.write('nrows         %d\n' %nrows)
2457        ascid.write('xllcorner     %d\n' %newxllcorner)
2458        ascid.write('yllcorner     %d\n' %newyllcorner)
2459        ascid.write('cellsize      %f\n' %cellsize)
2460        ascid.write('NODATA_value  %d\n' %NODATA_value)
2461
2462        #Get bounding polygon from mesh
2463        #P = interp.mesh.get_boundary_polygon()
2464        #inside_indices = inside_polygon(grid_points, P)
2465        for i in range(nrows):
2466            if verbose and i % ((nrows+10)/10) == 0:
2467                print 'Doing row %d of %d' %(i, nrows)
2468
2469            base_index = (nrows-i-1)*ncols
2470
2471            slice = grid_values[base_index:base_index+ncols]
2472            #s = array2string(slice, max_line_width=sys.maxint)
2473            s = num.array2string(slice, max_line_width=sys.maxint,
2474                                 precision=number_of_decimal_places)
2475            ascid.write(s[1:-1] + '\n')
2476
2477        #Close
2478        ascid.close()
2479        fid.close()
2480
2481        return basename_out
2482
2483################################################################################
2484# Obsolete functions - mainatined for backwards compatibility
2485################################################################################
2486
2487##
2488# @brief
2489# @param basename_in
2490# @param basename_out
2491# @param quantity
2492# @param timestep
2493# @param reduction
2494# @param cellsize
2495# @param verbose
2496# @param origin
2497# @note OBSOLETE - use sww2dem() instead.
2498def sww2asc(basename_in, basename_out = None,
2499            quantity = None,
2500            timestep = None,
2501            reduction = None,
2502            cellsize = 10,
2503            verbose = False,
2504            origin = None):
2505    print 'sww2asc will soon be obsoleted - please use sww2dem'
2506    sww2dem(basename_in,
2507            basename_out = basename_out,
2508            quantity = quantity,
2509            timestep = timestep,
2510            reduction = reduction,
2511            cellsize = cellsize,
2512            number_of_decimal_places = number_of_decimal_places,
2513            verbose = verbose,
2514            origin = origin,
2515        datum = 'WGS84',
2516        format = 'asc')
2517
2518
2519##
2520# @brief
2521# @param basename_in
2522# @param basename_out
2523# @param quantity
2524# @param timestep
2525# @param reduction
2526# @param cellsize
2527# @param verbose
2528# @param origin
2529# @param datum
2530# @note OBSOLETE - use sww2dem() instead.
2531def sww2ers(basename_in, basename_out=None,
2532            quantity=None,
2533            timestep=None,
2534            reduction=None,
2535            cellsize=10,
2536            verbose=False,
2537            origin=None,
2538            datum='WGS84'):
2539    print 'sww2ers will soon be obsoleted - please use sww2dem'
2540    sww2dem(basename_in,
2541            basename_out=basename_out,
2542            quantity=quantity,
2543            timestep=timestep,
2544            reduction=reduction,
2545            cellsize=cellsize,
2546            number_of_decimal_places=number_of_decimal_places,
2547            verbose=verbose,
2548            origin=origin,
2549            datum=datum,
2550            format='ers')
2551
2552################################################################################
2553# End of obsolete functions
2554################################################################################
2555
2556
2557##
2558# @brief Convert SWW file to PTS file (at selected points).
2559# @param basename_in Stem name of input SWW file.
2560# @param basename_out Stem name of output file.
2561# @param data_points If given, points where quantity is to be computed.
2562# @param quantity Name (or expression) of existing quantity(s) (def: elevation).
2563# @param timestep If given, output quantity at that timestep.
2564# @param reduction If given, reduce quantity by this factor.
2565# @param NODATA_value The NODATA value (default -9999).
2566# @param verbose True if this function is to be verbose.
2567# @param origin ??
2568def sww2pts(basename_in, basename_out=None,
2569            data_points=None,
2570            quantity=None,
2571            timestep=None,
2572            reduction=None,
2573            NODATA_value=-9999,
2574            verbose=False,
2575            origin=None):
2576    """Read SWW file and convert to interpolated values at selected points
2577
2578    The parameter 'quantity' must be the name of an existing quantity or
2579    an expression involving existing quantities. The default is 'elevation'.
2580
2581    if timestep (an index) is given, output quantity at that timestep.
2582
2583    if reduction is given use that to reduce quantity over all timesteps.
2584
2585    data_points (Nx2 array) give locations of points where quantity is to
2586    be computed.
2587    """
2588
2589    import sys
2590    from anuga.utilities.polygon import inside_polygon, outside_polygon, \
2591             separate_points_by_polygon
2592    from anuga.abstract_2d_finite_volumes.util import \
2593             apply_expression_to_dictionary
2594    from anuga.geospatial_data.geospatial_data import Geospatial_data
2595
2596    if quantity is None:
2597        quantity = 'elevation'
2598
2599    if reduction is None:
2600        reduction = max
2601
2602    if basename_out is None:
2603        basename_out = basename_in + '_%s' % quantity
2604
2605    swwfile = basename_in + '.sww'
2606    ptsfile = basename_out + '.pts'
2607
2608    # Read sww file
2609    if verbose: print 'Reading from %s' % swwfile
2610    from Scientific.IO.NetCDF import NetCDFFile
2611    fid = NetCDFFile(swwfile)
2612
2613    # Get extent and reference
2614    x = fid.variables['x'][:]
2615    y = fid.variables['y'][:]
2616    volumes = fid.variables['volumes'][:]
2617
2618    number_of_timesteps = fid.dimensions['number_of_timesteps']
2619    number_of_points = fid.dimensions['number_of_points']
2620    if origin is None:
2621        # Get geo_reference
2622        # sww files don't have to have a geo_ref
2623        try:
2624            geo_reference = Geo_reference(NetCDFObject=fid)
2625        except AttributeError, e:
2626            geo_reference = Geo_reference() # Default georef object
2627
2628        xllcorner = geo_reference.get_xllcorner()
2629        yllcorner = geo_reference.get_yllcorner()
2630        zone = geo_reference.get_zone()
2631    else:
2632        zone = origin[0]
2633        xllcorner = origin[1]
2634        yllcorner = origin[2]
2635
2636    # FIXME: Refactor using code from file_function.statistics
2637    # Something like print swwstats(swwname)
2638    if verbose:
2639        x = fid.variables['x'][:]
2640        y = fid.variables['y'][:]
2641        times = fid.variables['time'][:]
2642        print '------------------------------------------------'
2643        print 'Statistics of SWW file:'
2644        print '  Name: %s' % swwfile
2645        print '  Reference:'
2646        print '    Lower left corner: [%f, %f]' % (xllcorner, yllcorner)
2647        print '    Start time: %f' % fid.starttime[0]
2648        print '  Extent:'
2649        print '    x [m] in [%f, %f], len(x) == %d' \
2650              % (num.min(x), num.max(x), len(x.flat))
2651        print '    y [m] in [%f, %f], len(y) == %d' \
2652              % (num.min(y), num.max(y), len(y.flat))
2653        print '    t [s] in [%f, %f], len(t) == %d' \
2654              % (min(times), max(times), len(times))
2655        print '  Quantities [SI units]:'
2656        for name in ['stage', 'xmomentum', 'ymomentum', 'elevation']:
2657            q = fid.variables[name][:].flat
2658            print '    %s in [%f, %f]' % (name, min(q), max(q))
2659
2660    # Get quantity and reduce if applicable
2661    if verbose: print 'Processing quantity %s' % quantity
2662
2663    # Turn NetCDF objects into numeric arrays
2664    quantity_dict = {}
2665    for name in fid.variables.keys():
2666        quantity_dict[name] = fid.variables[name][:]
2667
2668    # Convert quantity expression to quantities found in sww file
2669    q = apply_expression_to_dictionary(quantity, quantity_dict)
2670
2671    if len(q.shape) == 2:
2672        # q has a time component and needs to be reduced along
2673        # the temporal dimension
2674        if verbose: print 'Reducing quantity %s' % quantity
2675
2676        q_reduced = num.zeros(number_of_points, num.float)
2677        for k in range(number_of_points):
2678            q_reduced[k] = reduction(q[:,k])
2679        q = q_reduced
2680
2681    # Post condition: Now q has dimension: number_of_points
2682    assert len(q.shape) == 1
2683    assert q.shape[0] == number_of_points
2684
2685    if verbose:
2686        print 'Processed values for %s are in [%f, %f]' \
2687              % (quantity, min(q), max(q))
2688
2689    # Create grid and update xll/yll corner and x,y
2690    vertex_points = num.concatenate((x[:, num.newaxis], y[:, num.newaxis]), axis=1)
2691    assert len(vertex_points.shape) == 2
2692
2693    # Interpolate
2694    from anuga.fit_interpolate.interpolate import Interpolate
2695    interp = Interpolate(vertex_points, volumes, verbose=verbose)
2696
2697    # Interpolate using quantity values
2698    if verbose: print 'Interpolating'
2699    interpolated_values = interp.interpolate(q, data_points).flatten()    #????#
2700
2701    if verbose:
2702        print ('Interpolated values are in [%f, %f]'
2703               % (num.min(interpolated_values), num.max(interpolated_values)))
2704
2705    # Assign NODATA_value to all points outside bounding polygon
2706    # (from interpolation mesh)
2707    P = interp.mesh.get_boundary_polygon()
2708    outside_indices = outside_polygon(data_points, P, closed=True)
2709
2710    for i in outside_indices:
2711        interpolated_values[i] = NODATA_value
2712
2713    # Store results
2714    G = Geospatial_data(data_points=data_points, attributes=interpolated_values)
2715
2716    G.export_points_file(ptsfile, absolute = True)
2717
2718    fid.close()
2719
2720
2721##
2722# @brief Convert ASC file to DEM file.
2723# @param basename_in Stem of input filename.
2724# @param basename_out Stem of output filename.
2725# @param use_cache ??
2726# @param verbose True if this function is to be verbose.
2727# @return
2728# @note A PRJ file with same stem basename must exist and is used to fix the
2729#       UTM zone, datum, false northings and eastings.
2730def convert_dem_from_ascii2netcdf(basename_in, basename_out=None,
2731                                  use_cache=False,
2732                                  verbose=False):
2733    """Read Digital Elevation model from the following ASCII format (.asc)
2734
2735    Example:
2736    ncols         3121
2737    nrows         1800
2738    xllcorner     722000
2739    yllcorner     5893000
2740    cellsize      25
2741    NODATA_value  -9999
2742    138.3698 137.4194 136.5062 135.5558 ..........
2743
2744    Convert basename_in + '.asc' to NetCDF format (.dem)
2745    mimicking the ASCII format closely.
2746
2747    An accompanying file with same basename_in but extension .prj must exist
2748    and is used to fix the UTM zone, datum, false northings and eastings.
2749
2750    The prj format is assumed to be as
2751
2752    Projection    UTM
2753    Zone          56
2754    Datum         WGS84
2755    Zunits        NO
2756    Units         METERS
2757    Spheroid      WGS84
2758    Xshift        0.0000000000
2759    Yshift        10000000.0000000000
2760    Parameters
2761    """
2762
2763    kwargs = {'basename_out': basename_out, 'verbose': verbose}
2764
2765    if use_cache is True:
2766        from caching import cache
2767        result = cache(_convert_dem_from_ascii2netcdf, basename_in, kwargs,
2768                       dependencies=[basename_in + '.asc',
2769                                     basename_in + '.prj'],
2770                       verbose=verbose)
2771
2772    else:
2773        result = apply(_convert_dem_from_ascii2netcdf, [basename_in], kwargs)
2774
2775    return result
2776
2777
2778##
2779# @brief Convert an ASC file to a DEM file.
2780# @param basename_in Stem of input filename.
2781# @param basename_out Stem of output filename.
2782# @param verbose True if this function is to be verbose.
2783def _convert_dem_from_ascii2netcdf(basename_in, basename_out = None,
2784                                  verbose = False):
2785    """Read Digital Elevation model from the following ASCII format (.asc)
2786
2787    Internal function. See public function convert_dem_from_ascii2netcdf
2788    for details.
2789    """
2790
2791    import os
2792    from Scientific.IO.NetCDF import NetCDFFile
2793
2794    root = basename_in
2795
2796    # Read Meta data
2797    if verbose: print 'Reading METADATA from %s' % root + '.prj'
2798
2799    metadatafile = open(root + '.prj')
2800    metalines = metadatafile.readlines()
2801    metadatafile.close()
2802
2803    L = metalines[0].strip().split()
2804    assert L[0].strip().lower() == 'projection'
2805    projection = L[1].strip()                   #TEXT
2806
2807    L = metalines[1].strip().split()
2808    assert L[0].strip().lower() == 'zone'
2809    zone = int(L[1].strip())
2810
2811    L = metalines[2].strip().split()
2812    assert L[0].strip().lower() == 'datum'
2813    datum = L[1].strip()                        #TEXT
2814
2815    L = metalines[3].strip().split()
2816    assert L[0].strip().lower() == 'zunits'     #IGNORE
2817    zunits = L[1].strip()                       #TEXT
2818
2819    L = metalines[4].strip().split()
2820    assert L[0].strip().lower() == 'units'
2821    units = L[1].strip()                        #TEXT
2822
2823    L = metalines[5].strip().split()
2824    assert L[0].strip().lower() == 'spheroid'   #IGNORE
2825    spheroid = L[1].strip()                     #TEXT
2826
2827    L = metalines[6].strip().split()
2828    assert L[0].strip().lower() == 'xshift'
2829    false_easting = float(L[1].strip())
2830
2831    L = metalines[7].strip().split()
2832    assert L[0].strip().lower() == 'yshift'
2833    false_northing = float(L[1].strip())
2834
2835    #Read DEM data
2836    datafile = open(basename_in + '.asc')
2837
2838    if verbose: print 'Reading DEM from %s' % basename_in + '.asc'
2839
2840    lines = datafile.readlines()
2841    datafile.close()
2842
2843    if verbose: print 'Got', len(lines), ' lines'
2844
2845    ncols = int(lines[0].split()[1].strip())
2846    nrows = int(lines[1].split()[1].strip())
2847
2848    # Do cellsize (line 4) before line 2 and 3
2849    cellsize = float(lines[4].split()[1].strip())
2850
2851    # Checks suggested by Joaquim Luis
2852    # Our internal representation of xllcorner
2853    # and yllcorner is non-standard.
2854    xref = lines[2].split()
2855    if xref[0].strip() == 'xllcorner':
2856        xllcorner = float(xref[1].strip()) # + 0.5*cellsize # Correct offset
2857    elif xref[0].strip() == 'xllcenter':
2858        xllcorner = float(xref[1].strip())
2859    else:
2860        msg = 'Unknown keyword: %s' % xref[0].strip()
2861        raise Exception, msg
2862
2863    yref = lines[3].split()
2864    if yref[0].strip() == 'yllcorner':
2865        yllcorner = float(yref[1].strip()) # + 0.5*cellsize # Correct offset
2866    elif yref[0].strip() == 'yllcenter':
2867        yllcorner = float(yref[1].strip())
2868    else:
2869        msg = 'Unknown keyword: %s' % yref[0].strip()
2870        raise Exception, msg
2871
2872    NODATA_value = int(lines[5].split()[1].strip())
2873
2874    assert len(lines) == nrows + 6
2875
2876    if basename_out == None:
2877        netcdfname = root + '.dem'
2878    else:
2879        netcdfname = basename_out + '.dem'
2880
2881    if verbose: print 'Store to NetCDF file %s' % netcdfname
2882
2883    # NetCDF file definition
2884    fid = NetCDFFile(netcdfname, netcdf_mode_w)
2885
2886    #Create new file
2887    fid.institution = 'Geoscience Australia'
2888    fid.description = 'NetCDF DEM format for compact and portable storage ' \
2889                      'of spatial point data'
2890
2891    fid.ncols = ncols
2892    fid.nrows = nrows
2893    fid.xllcorner = xllcorner
2894    fid.yllcorner = yllcorner
2895    fid.cellsize = cellsize
2896    fid.NODATA_value = NODATA_value
2897
2898    fid.zone = zone
2899    fid.false_easting = false_easting
2900    fid.false_northing = false_northing
2901    fid.projection = projection
2902    fid.datum = datum
2903    fid.units = units
2904
2905    # dimension definitions
2906    fid.createDimension('number_of_rows', nrows)
2907    fid.createDimension('number_of_columns', ncols)
2908
2909    # variable definitions
2910    fid.createVariable('elevation', netcdf_float, ('number_of_rows',
2911                                                   'number_of_columns'))
2912
2913    # Get handles to the variables
2914    elevation = fid.variables['elevation']
2915
2916    #Store data
2917    n = len(lines[6:])
2918    for i, line in enumerate(lines[6:]):
2919        fields = line.split()
2920        if verbose and i % ((n+10)/10) == 0:
2921            print 'Processing row %d of %d' % (i, nrows)
2922        elevation[i, :] = num.array([float(x) for x in fields])
2923
2924    fid.close()
2925
2926
2927##
2928# @brief Convert 'ferret' file to SWW file.
2929# @param basename_in Stem of input filename.
2930# @param basename_out Stem of output filename.
2931# @param verbose True if this function is to be verbose.
2932# @param minlat
2933# @param maxlat
2934# @param minlon
2935# @param maxlon
2936# @param mint
2937# @param maxt
2938# @param mean_stage
2939# @param origin
2940# @param zscale
2941# @param fail_on_NaN
2942# @param NaN_filler
2943# @param elevation
2944# @param inverted_bathymetry
2945def ferret2sww(basename_in, basename_out=None,
2946               verbose=False,
2947               minlat=None, maxlat=None,
2948               minlon=None, maxlon=None,
2949               mint=None, maxt=None, mean_stage=0,
2950               origin=None, zscale=1,
2951               fail_on_NaN=True,
2952               NaN_filler=0,
2953               elevation=None,
2954               inverted_bathymetry=True
2955               ): #FIXME: Bathymetry should be obtained
2956                                  #from MOST somehow.
2957                                  #Alternatively from elsewhere
2958                                  #or, as a last resort,
2959                                  #specified here.
2960                                  #The value of -100 will work
2961                                  #for the Wollongong tsunami
2962                                  #scenario but is very hacky
2963    """Convert MOST and 'Ferret' NetCDF format for wave propagation to
2964    sww format native to abstract_2d_finite_volumes.
2965
2966    Specify only basename_in and read files of the form
2967    basefilename_ha.nc, basefilename_ua.nc, basefilename_va.nc containing
2968    relative height, x-velocity and y-velocity, respectively.
2969
2970    Also convert latitude and longitude to UTM. All coordinates are
2971    assumed to be given in the GDA94 datum.
2972
2973    min's and max's: If omitted - full extend is used.
2974    To include a value min may equal it, while max must exceed it.
2975    Lat and lon are assuemd to be in decimal degrees
2976
2977    origin is a 3-tuple with geo referenced
2978    UTM coordinates (zone, easting, northing)
2979
2980    nc format has values organised as HA[TIME, LATITUDE, LONGITUDE]
2981    which means that longitude is the fastest
2982    varying dimension (row major order, so to speak)
2983
2984    ferret2sww uses grid points as vertices in a triangular grid
2985    counting vertices from lower left corner upwards, then right
2986    """
2987
2988    import os
2989    from Scientific.IO.NetCDF import NetCDFFile
2990
2991    precision = num.float
2992
2993    msg = 'Must use latitudes and longitudes for minlat, maxlon etc'
2994
2995    if minlat != None:
2996        assert -90 < minlat < 90 , msg
2997    if maxlat != None:
2998        assert -90 < maxlat < 90 , msg
2999        if minlat != None:
3000            assert maxlat > minlat
3001    if minlon != None:
3002        assert -180 < minlon < 180 , msg
3003    if maxlon != None:
3004        assert -180 < maxlon < 180 , msg
3005        if minlon != None:
3006            assert maxlon > minlon
3007
3008    # Get NetCDF data
3009    if verbose: print 'Reading files %s_*.nc' % basename_in
3010
3011    file_h = NetCDFFile(basename_in + '_ha.nc', netcdf_mode_r) # Wave amplitude (cm)
3012    file_u = NetCDFFile(basename_in + '_ua.nc', netcdf_mode_r) # Velocity (x) (cm/s)
3013    file_v = NetCDFFile(basename_in + '_va.nc', netcdf_mode_r) # Velocity (y) (cm/s)
3014    file_e = NetCDFFile(basename_in + '_e.nc', netcdf_mode_r)  # Elevation (z) (m)
3015
3016    if basename_out is None:
3017        swwname = basename_in + '.sww'
3018    else:
3019        swwname = basename_out + '.sww'
3020
3021    # Get dimensions of file_h
3022    for dimension in file_h.dimensions.keys():
3023        if dimension[:3] == 'LON':
3024            dim_h_longitude = dimension
3025        if dimension[:3] == 'LAT':
3026            dim_h_latitude = dimension
3027        if dimension[:4] == 'TIME':
3028            dim_h_time = dimension
3029
3030    times = file_h.variables[dim_h_time]
3031    latitudes = file_h.variables[dim_h_latitude]
3032    longitudes = file_h.variables[dim_h_longitude]
3033
3034    kmin, kmax, lmin, lmax = _get_min_max_indexes(latitudes[:],
3035                                                  longitudes[:],
3036                                                  minlat, maxlat,
3037                                                  minlon, maxlon)
3038    # get dimensions for file_e
3039    for dimension in file_e.dimensions.keys():
3040        if dimension[:3] == 'LON':
3041            dim_e_longitude = dimension
3042        if dimension[:3] == 'LAT':
3043            dim_e_latitude = dimension
3044
3045    # get dimensions for file_u
3046    for dimension in file_u.dimensions.keys():
3047        if dimension[:3] == 'LON':
3048            dim_u_longitude = dimension
3049        if dimension[:3] == 'LAT':
3050            dim_u_latitude = dimension
3051        if dimension[:4] == 'TIME':
3052            dim_u_time = dimension
3053
3054    # get dimensions for file_v
3055    for dimension in file_v.dimensions.keys():
3056        if dimension[:3] == 'LON':
3057            dim_v_longitude = dimension
3058        if dimension[:3] == 'LAT':
3059            dim_v_latitude = dimension
3060        if dimension[:4] == 'TIME':
3061            dim_v_time = dimension
3062
3063    # Precision used by most for lat/lon is 4 or 5 decimals
3064    e_lat = num.around(file_e.variables[dim_e_latitude][:], 5)
3065    e_lon = num.around(file_e.variables[dim_e_longitude][:], 5)
3066
3067    # Check that files are compatible
3068    assert num.allclose(latitudes, file_u.variables[dim_u_latitude])
3069    assert num.allclose(latitudes, file_v.variables[dim_v_latitude])
3070    assert num.allclose(latitudes, e_lat)
3071
3072    assert num.allclose(longitudes, file_u.variables[dim_u_longitude])
3073    assert num.allclose(longitudes, file_v.variables[dim_v_longitude])
3074    assert num.allclose(longitudes, e_lon)
3075
3076    if mint is None:
3077        jmin = 0
3078        mint = times[0]
3079    else:
3080        jmin = num.searchsorted(times, mint)
3081
3082    if maxt is None:
3083        jmax = len(times)
3084        maxt = times[-1]
3085    else:
3086        jmax = num.searchsorted(times, maxt)
3087
3088    kmin, kmax, lmin, lmax = _get_min_max_indexes(latitudes[:],
3089                                                  longitudes[:],
3090                                                  minlat, maxlat,
3091                                                  minlon, maxlon)
3092
3093
3094    times = times[jmin:jmax]
3095    latitudes = latitudes[kmin:kmax]
3096    longitudes = longitudes[lmin:lmax]
3097
3098    if verbose: print 'cropping'
3099
3100    zname = 'ELEVATION'
3101
3102    amplitudes = file_h.variables['HA'][jmin:jmax, kmin:kmax, lmin:lmax]
3103    uspeed = file_u.variables['UA'][jmin:jmax, kmin:kmax, lmin:lmax] #Lon
3104    vspeed = file_v.variables['VA'][jmin:jmax, kmin:kmax, lmin:lmax] #Lat
3105    elevations = file_e.variables[zname][kmin:kmax, lmin:lmax]
3106
3107    #    if latitudes2[0]==latitudes[0] and latitudes2[-1]==latitudes[-1]:
3108    #        elevations = file_e.variables['ELEVATION'][kmin:kmax, lmin:lmax]
3109    #    elif latitudes2[0]==latitudes[-1] and latitudes2[-1]==latitudes[0]:
3110    #        from numpy import asarray
3111    #        elevations=elevations.tolist()
3112    #        elevations.reverse()
3113    #        elevations=asarray(elevations)
3114    #    else:
3115    #        from numpy import asarray
3116    #        elevations=elevations.tolist()
3117    #        elevations.reverse()
3118    #        elevations=asarray(elevations)
3119    #        'print hmmm'
3120
3121    #Get missing values
3122    nan_ha = file_h.variables['HA'].missing_value[0]
3123    nan_ua = file_u.variables['UA'].missing_value[0]
3124    nan_va = file_v.variables['VA'].missing_value[0]
3125    if hasattr(file_e.variables[zname],'missing_value'):
3126        nan_e  = file_e.variables[zname].missing_value[0]
3127    else:
3128        nan_e = None
3129
3130    #Cleanup
3131    missing = (amplitudes == nan_ha)
3132    if num.sometrue (missing):
3133        if fail_on_NaN:
3134            msg = 'NetCDFFile %s contains missing values' \
3135                  % basename_in + '_ha.nc'
3136            raise DataMissingValuesError, msg
3137        else:
3138            amplitudes = amplitudes*(missing==0) + missing*NaN_filler
3139
3140    missing = (uspeed == nan_ua)
3141    if num.sometrue (missing):
3142        if fail_on_NaN:
3143            msg = 'NetCDFFile %s contains missing values' \
3144                  % basename_in + '_ua.nc'
3145            raise DataMissingValuesError, msg
3146        else:
3147            uspeed = uspeed*(missing==0) + missing*NaN_filler
3148
3149    missing = (vspeed == nan_va)
3150    if num.sometrue (missing):
3151        if fail_on_NaN:
3152            msg = 'NetCDFFile %s contains missing values' \
3153                  % basename_in + '_va.nc'
3154            raise DataMissingValuesError, msg
3155        else:
3156            vspeed = vspeed*(missing==0) + missing*NaN_filler
3157
3158    missing = (elevations == nan_e)
3159    if num.sometrue (missing):
3160        if fail_on_NaN:
3161            msg = 'NetCDFFile %s contains missing values' \
3162                  % basename_in + '_e.nc'
3163            raise DataMissingValuesError, msg
3164        else:
3165            elevations = elevations*(missing==0) + missing*NaN_filler
3166
3167    number_of_times = times.shape[0]
3168    number_of_latitudes = latitudes.shape[0]
3169    number_of_longitudes = longitudes.shape[0]
3170
3171    assert amplitudes.shape[0] == number_of_times
3172    assert amplitudes.shape[1] == number_of_latitudes
3173    assert amplitudes.shape[2] == number_of_longitudes
3174
3175    if verbose:
3176        print '------------------------------------------------'
3177        print 'Statistics:'
3178        print '  Extent (lat/lon):'
3179        print '    lat in [%f, %f], len(lat) == %d' \
3180              % (num.min(latitudes), num.max(latitudes), len(latitudes.flat))
3181        print '    lon in [%f, %f], len(lon) == %d' \
3182              % (num.min(longitudes), num.max(longitudes),
3183                 len(longitudes.flat))
3184        print '    t in [%f, %f], len(t) == %d' \
3185              % (num.min(times), num.max(times), len(times.flat))
3186
3187        q = amplitudes.flatten()
3188        name = 'Amplitudes (ha) [cm]'
3189        print %s in [%f, %f]' % (name, min(q), max(q))
3190
3191        q = uspeed.flatten()
3192        name = 'Speeds (ua) [cm/s]'
3193        print %s in [%f, %f]' % (name, min(q), max(q))
3194
3195        q = vspeed.flatten()
3196        name = 'Speeds (va) [cm/s]'
3197        print %s in [%f, %f]' % (name, min(q), max(q))
3198
3199        q = elevations.flatten()
3200        name = 'Elevations (e) [m]'
3201        print %s in [%f, %f]' % (name, min(q), max(q))
3202
3203    # print number_of_latitudes, number_of_longitudes
3204    number_of_points = number_of_latitudes * number_of_longitudes
3205    number_of_volumes = (number_of_latitudes-1) * (number_of_longitudes-1) * 2
3206
3207    file_h.close()
3208    file_u.close()
3209    file_v.close()
3210    file_e.close()
3211
3212    # NetCDF file definition
3213    outfile = NetCDFFile(swwname, netcdf_mode_w)
3214
3215    description = 'Converted from Ferret files: %s, %s, %s, %s' \
3216                  % (basename_in + '_ha.nc',
3217                     basename_in + '_ua.nc',
3218                     basename_in + '_va.nc',
3219                     basename_in + '_e.nc')
3220
3221    # Create new file
3222    starttime = times[0]
3223
3224    sww = Write_sww()
3225    sww.store_header(outfile, times, number_of_volumes,
3226                     number_of_points, description=description,
3227                     verbose=verbose, sww_precision=netcdf_float)
3228
3229    # Store
3230    from anuga.coordinate_transforms.redfearn import redfearn
3231    x = num.zeros(number_of_points, num.float)  #Easting
3232    y = num.zeros(number_of_points, num.float)  #Northing
3233
3234    if verbose: print 'Making triangular grid'
3235
3236    # Check zone boundaries
3237    refzone, _, _ = redfearn(latitudes[0], longitudes[0])
3238
3239    vertices = {}
3240    i = 0
3241    for k, lat in enumerate(latitudes):       # Y direction
3242        for l, lon in enumerate(longitudes):  # X direction
3243            vertices[l,k] = i
3244
3245            zone, easting, northing = redfearn(lat,lon)
3246
3247            #msg = 'Zone boundary crossed at longitude =', lon
3248            #assert zone == refzone, msg
3249            #print '%7.2f %7.2f %8.2f %8.2f' %(lon, lat, easting, northing)
3250            x[i] = easting
3251            y[i] = northing
3252            i += 1
3253
3254    #Construct 2 triangles per 'rectangular' element
3255    volumes = []
3256    for l in range(number_of_longitudes-1):    # X direction
3257        for k in range(number_of_latitudes-1): # Y direction
3258            v1 = vertices[l,k+1]
3259            v2 = vertices[l,k]
3260            v3 = vertices[l+1,k+1]
3261            v4 = vertices[l+1,k]
3262
3263            volumes.append([v1,v2,v3]) #Upper element
3264            volumes.append([v4,v3,v2]) #Lower element
3265
3266    volumes = num.array(volumes, num.int)      #array default#
3267
3268    if origin is None:
3269        origin = Geo_reference(refzone, min(x), min(y))
3270    geo_ref = write_NetCDF_georeference(origin, outfile)
3271
3272    if elevation is not None:
3273        z = elevation
3274    else:
3275        if inverted_bathymetry:
3276            z = -1 * elevations
3277        else:
3278            z = elevations
3279    #FIXME: z should be obtained from MOST and passed in here
3280
3281    #FIXME use the Write_sww instance(sww) to write this info
3282    z = num.resize(z, outfile.variables['z'][:].shape)
3283    outfile.variables['x'][:] = x - geo_ref.get_xllcorner()
3284    outfile.variables['y'][:] = y - geo_ref.get_yllcorner()
3285    outfile.variables['z'][:] = z             #FIXME HACK for bacwards compat.
3286    outfile.variables['elevation'][:] = z
3287    outfile.variables['volumes'][:] = volumes.astype(num.int32) #For Opteron 64
3288
3289    #Time stepping
3290    stage = outfile.variables['stage']
3291    xmomentum = outfile.variables['xmomentum']
3292    ymomentum = outfile.variables['ymomentum']
3293
3294    if verbose: print 'Converting quantities'
3295
3296    n = len(times)
3297    for j in range(n):
3298        if verbose and j % ((n+10)/10) == 0: print '  Doing %d of %d' %(j, n)
3299
3300        i = 0
3301        for k in range(number_of_latitudes):      # Y direction
3302            for l in range(number_of_longitudes): # X direction
3303                w = zscale * amplitudes[j,k,l] / 100 + mean_stage
3304                stage[j,i] = w
3305                h = w - z[i]
3306                xmomentum[j,i] = uspeed[j,k,l]/100*h
3307                ymomentum[j,i] = vspeed[j,k,l]/100*h
3308                i += 1
3309
3310    #outfile.close()
3311
3312    #FIXME: Refactor using code from file_function.statistics
3313    #Something like print swwstats(swwname)
3314    if verbose:
3315        x = outfile.variables['x'][:]
3316        y = outfile.variables['y'][:]
3317        print '------------------------------------------------'
3318        print 'Statistics of output file:'
3319        print '  Name: %s' %swwname
3320        print '  Reference:'
3321        print '    Lower left corner: [%f, %f]' \
3322              % (geo_ref.get_xllcorner(), geo_ref.get_yllcorner())
3323        print ' Start time: %f' %starttime
3324        print '    Min time: %f' %mint
3325        print '    Max time: %f' %maxt
3326        print '  Extent:'
3327        print '    x [m] in [%f, %f], len(x) == %d' \
3328              % (num.min(x), num.max(x), len(x.flat))
3329        print '    y [m] in [%f, %f], len(y) == %d' \
3330              % (num.min(y), num.max(y), len(y.flat))
3331        print '    t [s] in [%f, %f], len(t) == %d' \
3332              % (min(times), max(times), len(times))
3333        print '  Quantities [SI units]:'
3334        for name in ['stage', 'xmomentum', 'ymomentum', 'elevation']:
3335            q = outfile.variables[name][:].flatten()
3336            print '    %s in [%f, %f]' % (name, min(q), max(q))
3337
3338    outfile.close()
3339
3340
3341##
3342# @brief Convert time-series text file to TMS file.
3343# @param filename
3344# @param quantity_names
3345# @param time_as_seconds
3346def timefile2netcdf(filename, quantity_names=None, time_as_seconds=False):
3347    """Template for converting typical text files with time series to
3348    NetCDF tms file.
3349
3350    The file format is assumed to be either two fields separated by a comma:
3351
3352        time [DD/MM/YY hh:mm:ss], value0 value1 value2 ...
3353
3354    E.g
3355
3356      31/08/04 00:00:00, 1.328223 0 0
3357      31/08/04 00:15:00, 1.292912 0 0
3358
3359    or time (seconds), value0 value1 value2 ...
3360
3361      0.0, 1.328223 0 0
3362      0.1, 1.292912 0 0
3363
3364    will provide a time dependent function f(t) with three attributes
3365
3366    filename is assumed to be the rootname with extenisons .txt and .sww
3367    """
3368
3369    import time, calendar
3370    from anuga.config import time_format
3371    from anuga.utilities.numerical_tools import ensure_numeric
3372
3373    file_text = filename + '.txt'
3374    fid = open(file_text)
3375    line = fid.readline()
3376    fid.close()
3377
3378    fields = line.split(',')
3379    msg = "File %s must have the format 'datetime, value0 value1 value2 ...'" \
3380          % file_text
3381    assert len(fields) == 2, msg
3382
3383    if not time_as_seconds:
3384        try:
3385            starttime = calendar.timegm(time.strptime(fields[0], time_format))
3386        except ValueError:
3387            msg = 'First field in file %s must be' % file_text
3388            msg += ' date-time with format %s.\n' % time_format
3389            msg += 'I got %s instead.' % fields[0]
3390            raise DataTimeError, msg
3391    else:
3392        try:
3393            starttime = float(fields[0])
3394        except Error:
3395            msg = "Bad time format"
3396            raise DataTimeError, msg
3397
3398    # Split values
3399    values = []
3400    for value in fields[1].split():
3401        values.append(float(value))
3402
3403    q = ensure_numeric(values)
3404
3405    msg = 'ERROR: File must contain at least one independent value'
3406    assert len(q.shape) == 1, msg
3407
3408    # Read times proper
3409    from anuga.config import time_format
3410    import time, calendar
3411
3412    fid = open(file_text)
3413    lines = fid.readlines()
3414    fid.close()
3415
3416    N = len(lines)
3417    d = len(q)
3418
3419    T = num.zeros(N, num.float)       # Time
3420    Q = num.zeros((N, d), num.float)  # Values
3421
3422    for i, line in enumerate(lines):
3423        fields = line.split(',')
3424        if not time_as_seconds:
3425            realtime = calendar.timegm(time.strptime(fields[0], time_format))
3426        else:
3427             realtime = float(fields[0])
3428        T[i] = realtime - starttime
3429
3430        for j, value in enumerate(fields[1].split()):
3431            Q[i, j] = float(value)
3432
3433    msg = 'File %s must list time as a monotonuosly ' % filename
3434    msg += 'increasing sequence'
3435    assert num.alltrue(T[1:] - T[:-1] > 0), msg
3436
3437    #Create NetCDF file
3438    from Scientific.IO.NetCDF import NetCDFFile
3439
3440    fid = NetCDFFile(filename + '.tms', netcdf_mode_w)
3441
3442    fid.institution = 'Geoscience Australia'
3443    fid.description = 'Time series'
3444
3445    #Reference point
3446    #Start time in seconds since the epoch (midnight 1/1/1970)
3447    #FIXME: Use Georef
3448    fid.starttime = starttime
3449
3450    # dimension definitions
3451    #fid.createDimension('number_of_volumes', self.number_of_volumes)
3452    #fid.createDimension('number_of_vertices', 3)
3453
3454    fid.createDimension('number_of_timesteps', len(T))
3455
3456    fid.createVariable('time', netcdf_float, ('number_of_timesteps',))
3457
3458    fid.variables['time'][:] = T
3459
3460    for i in range(Q.shape[1]):
3461        try:
3462            name = quantity_names[i]
3463        except:
3464            name = 'Attribute%d' % i
3465
3466        fid.createVariable(name, netcdf_float, ('number_of_timesteps',))
3467        fid.variables[name][:] = Q[:,i]
3468
3469    fid.close()
3470
3471
3472##
3473# @brief Get the extents of a NetCDF data file.
3474# @param file_name The path to the SWW file.
3475# @return A list of x, y, z and stage limits (min, max).
3476def extent_sww(file_name):
3477    """Read in an sww file.
3478
3479    Input:
3480    file_name - the sww file
3481
3482    Output:
3483    A list: [min(x),max(x),min(y),max(y),min(stage.flat),max(stage.flat)]
3484    """
3485
3486    from Scientific.IO.NetCDF import NetCDFFile
3487
3488    #Get NetCDF
3489    fid = NetCDFFile(file_name, netcdf_mode_r)
3490
3491    # Get the variables
3492    x = fid.variables['x'][:]
3493    y = fid.variables['y'][:]
3494    stage = fid.variables['stage'][:]
3495
3496    fid.close()
3497
3498    return [min(x), max(x), min(y), max(y), num.min(stage), num.max(stage)]
3499
3500
3501##
3502# @brief
3503# @param filename
3504# @param boundary
3505# @param t
3506# @param fail_if_NaN
3507# @param NaN_filler
3508# @param verbose
3509# @param very_verbose
3510# @return
3511def sww2domain(filename, boundary=None, t=None,
3512               fail_if_NaN=True, NaN_filler=0,
3513               verbose=False, very_verbose=False):
3514    """
3515    Usage: domain = sww2domain('file.sww',t=time (default = last time in file))
3516
3517    Boundary is not recommended if domain.smooth is not selected, as it
3518    uses unique coordinates, but not unique boundaries. This means that
3519    the boundary file will not be compatable with the coordinates, and will
3520    give a different final boundary, or crash.
3521    """
3522
3523    from Scientific.IO.NetCDF import NetCDFFile
3524    from shallow_water import Domain
3525
3526    # initialise NaN.
3527    NaN = 9.969209968386869e+036
3528
3529    if verbose: print 'Reading from ', filename
3530
3531    fid = NetCDFFile(filename, netcdf_mode_r)    # Open existing file for read
3532    time = fid.variables['time']       # Timesteps
3533    if t is None:
3534        t = time[-1]
3535    time_interp = get_time_interp(time,t)
3536
3537    # Get the variables as numeric arrays
3538    x = fid.variables['x'][:]                   # x-coordinates of vertices
3539    y = fid.variables['y'][:]                   # y-coordinates of vertices
3540    elevation = fid.variables['elevation']      # Elevation
3541    stage = fid.variables['stage']              # Water level
3542    xmomentum = fid.variables['xmomentum']      # Momentum in the x-direction
3543    ymomentum = fid.variables['ymomentum']      # Momentum in the y-direction
3544
3545    starttime = fid.starttime[0]
3546    volumes = fid.variables['volumes'][:]       # Connectivity
3547    coordinates = num.transpose(num.asarray([x.tolist(), y.tolist()]))
3548    # FIXME (Ole): Something like this might be better:
3549    #                 concatenate((x, y), axis=1)
3550    # or              concatenate((x[:,num.newaxis], x[:,num.newaxis]), axis=1)
3551
3552    conserved_quantities = []
3553    interpolated_quantities = {}
3554    other_quantities = []
3555
3556    # get geo_reference
3557    try:                             # sww files don't have to have a geo_ref
3558        geo_reference = Geo_reference(NetCDFObject=fid)
3559    except: # AttributeError, e:
3560        geo_reference = None
3561
3562    if verbose: print '    getting quantities'
3563
3564    for quantity in fid.variables.keys():
3565        dimensions = fid.variables[quantity].dimensions
3566        if 'number_of_timesteps' in dimensions:
3567            conserved_quantities.append(quantity)
3568            interpolated_quantities[quantity] = \
3569                  interpolated_quantity(fid.variables[quantity][:], time_interp)
3570        else:
3571            other_quantities.append(quantity)
3572
3573    other_quantities.remove('x')
3574    other_quantities.remove('y')
3575    other_quantities.remove('z')
3576    other_quantities.remove('volumes')
3577    try:
3578        other_quantities.remove('stage_range')
3579        other_quantities.remove('xmomentum_range')
3580        other_quantities.remove('ymomentum_range')
3581        other_quantities.remove('elevation_range')
3582    except:
3583        pass
3584
3585    conserved_quantities.remove('time')
3586
3587    if verbose: print '    building domain'
3588
3589    #    From domain.Domain:
3590    #    domain = Domain(coordinates, volumes,\
3591    #                    conserved_quantities = conserved_quantities,\
3592    #                    other_quantities = other_quantities,zone=zone,\
3593    #                    xllcorner=xllcorner, yllcorner=yllcorner)
3594
3595    # From shallow_water.Domain:
3596    coordinates = coordinates.tolist()
3597    volumes = volumes.tolist()
3598    # FIXME:should this be in mesh? (peter row)
3599    if fid.smoothing == 'Yes':
3600        unique = False
3601    else:
3602        unique = True
3603    if unique:
3604        coordinates, volumes, boundary = weed(coordinates, volumes,boundary)
3605
3606    try:
3607        domain = Domain(coordinates, volumes, boundary)
3608    except AssertionError, e:
3609        fid.close()
3610        msg = 'Domain could not be created: %s. ' \
3611              'Perhaps use "fail_if_NaN=False and NaN_filler = ..."' % e
3612        raise DataDomainError, msg
3613
3614    if not boundary is None:
3615        domain.boundary = boundary
3616
3617    domain.geo_reference = geo_reference
3618
3619    domain.starttime = float(starttime) + float(t)
3620    domain.time = 0.0
3621
3622    for quantity in other_quantities:
3623        try:
3624            NaN = fid.variables[quantity].missing_value
3625        except:
3626            pass                       # quantity has no missing_value number
3627        X = fid.variables[quantity][:]
3628        if very_verbose:
3629            print '       ', quantity
3630            print '        NaN =', NaN
3631            print '        max(X)'
3632            print '       ', max(X)
3633            print '        max(X)==NaN'
3634            print '       ', max(X)==NaN
3635            print ''
3636        if max(X) == NaN or min(X) == NaN:
3637            if fail_if_NaN:
3638                msg = 'quantity "%s" contains no_data entry' % quantity
3639                raise DataMissingValuesError, msg
3640            else:
3641                data = (X != NaN)
3642                X = (X*data) + (data==0)*NaN_filler
3643        if unique:
3644            X = num.resize(X, (len(X)/3, 3))
3645        domain.set_quantity(quantity, X)
3646    #
3647    for quantity in conserved_quantities:
3648        try:
3649            NaN = fid.variables[quantity].missing_value
3650        except:
3651            pass                       # quantity has no missing_value number
3652        X = interpolated_quantities[quantity]
3653        if very_verbose:
3654            print '       ',quantity
3655            print '        NaN =', NaN
3656            print '        max(X)'
3657            print '       ', max(X)
3658            print '        max(X)==NaN'
3659            print '       ', max(X)==NaN
3660            print ''
3661        if max(X) == NaN or min(X) == NaN:
3662            if fail_if_NaN:
3663                msg = 'quantity "%s" contains no_data entry' % quantity
3664                raise DataMissingValuesError, msg
3665            else:
3666                data = (X != NaN)
3667                X = (X*data) + (data==0)*NaN_filler
3668        if unique:
3669            X = num.resize(X, (X.shape[0]/3, 3))
3670        domain.set_quantity(quantity, X)
3671
3672    fid.close()
3673
3674    return domain
3675
3676
3677##
3678# @brief Interpolate a quantity wrt time.
3679# @param saved_quantity The quantity to interpolate.
3680# @param time_interp (index, ratio)
3681# @return A vector of interpolated values.
3682def interpolated_quantity(saved_quantity, time_interp):
3683    '''Given an index and ratio, interpolate quantity with respect to time.'''
3684
3685    index, ratio = time_interp
3686
3687    Q = saved_quantity
3688
3689    if ratio > 0:
3690        q = (1-ratio)*Q[index] + ratio*Q[index+1]
3691    else:
3692        q = Q[index]
3693
3694    #Return vector of interpolated values
3695    return q
3696
3697
3698##
3699# @brief
3700# @parm time
3701# @param t
3702# @return An (index, ration) tuple.
3703def get_time_interp(time, t=None):
3704    #Finds the ratio and index for time interpolation.
3705    #It is borrowed from previous abstract_2d_finite_volumes code.
3706    if t is None:
3707        t=time[-1]
3708        index = -1
3709        ratio = 0.
3710    else:
3711        T = time
3712        tau = t
3713        index=0
3714        msg = 'Time interval derived from file %s [%s:%s]' \
3715              % ('FIXMEfilename', T[0], T[-1])
3716        msg += ' does not match model time: %s' % tau
3717        if tau < time[0]: raise DataTimeError, msg
3718        if tau > time[-1]: raise DataTimeError, msg
3719        while tau > time[index]: index += 1
3720        while tau < time[index]: index -= 1
3721        if tau == time[index]:
3722            #Protect against case where tau == time[-1] (last time)
3723            # - also works in general when tau == time[i]
3724            ratio = 0
3725        else:
3726            #t is now between index and index+1
3727            ratio = (tau - time[index])/(time[index+1] - time[index])
3728
3729    return (index, ratio)
3730
3731
3732##
3733# @brief
3734# @param coordinates
3735# @param volumes
3736# @param boundary
3737def weed(coordinates, volumes, boundary=None):
3738    if isinstance(coordinates, num.ndarray):
3739        coordinates = coordinates.tolist()
3740    if isinstance(volumes, num.ndarray):
3741        volumes = volumes.tolist()
3742
3743    unique = False
3744    point_dict = {}
3745    same_point = {}
3746    for i in range(len(coordinates)):
3747        point = tuple(coordinates[i])
3748        if point_dict.has_key(point):
3749            unique = True
3750            same_point[i] = point
3751            #to change all point i references to point j
3752        else:
3753            point_dict[point] = i
3754            same_point[i] = point
3755
3756    coordinates = []
3757    i = 0
3758    for point in point_dict.keys():
3759        point = tuple(point)
3760        coordinates.append(list(point))
3761        point_dict[point] = i
3762        i += 1
3763
3764    for volume in volumes:
3765        for i in range(len(volume)):
3766            index = volume[i]
3767            if index > -1:
3768                volume[i] = point_dict[same_point[index]]
3769
3770    new_boundary = {}
3771    if not boundary is None:
3772        for segment in boundary.keys():
3773            point0 = point_dict[same_point[segment[0]]]
3774            point1 = point_dict[same_point[segment[1]]]
3775            label = boundary[segment]
3776            #FIXME should the bounday attributes be concaterated
3777            #('exterior, pond') or replaced ('pond')(peter row)
3778
3779            if new_boundary.has_key((point0, point1)):
3780                new_boundary[(point0,point1)] = new_boundary[(point0, point1)]
3781
3782            elif new_boundary.has_key((point1, point0)):
3783                new_boundary[(point1,point0)] = new_boundary[(point1, point0)]
3784            else: new_boundary[(point0, point1)] = label
3785
3786        boundary = new_boundary
3787
3788    return coordinates, volumes, boundary
3789
3790
3791##
3792# @brief Read DEM file, decimate data, write new DEM file.
3793# @param basename_in Stem of the input filename.
3794# @param stencil
3795# @param cellsize_new New cell size to resample on.
3796# @param basename_out Stem of the output filename.
3797# @param verbose True if this function is to be verbose.
3798def decimate_dem(basename_in, stencil, cellsize_new, basename_out=None,
3799                 verbose=False):
3800    """Read Digitial Elevation model from the following NetCDF format (.dem)
3801
3802    Example:
3803
3804    ncols         3121
3805    nrows         1800
3806    xllcorner     722000
3807    yllcorner     5893000
3808    cellsize      25
3809    NODATA_value  -9999
3810    138.3698 137.4194 136.5062 135.5558 ..........
3811
3812    Decimate data to cellsize_new using stencil and write to NetCDF dem format.
3813    """
3814
3815    import os
3816    from Scientific.IO.NetCDF import NetCDFFile
3817
3818    root = basename_in
3819    inname = root + '.dem'
3820
3821    #Open existing netcdf file to read
3822    infile = NetCDFFile(inname, netcdf_mode_r)
3823
3824    if verbose: print 'Reading DEM from %s' % inname
3825
3826    #Read metadata
3827    ncols = infile.ncols[0]
3828    nrows = infile.nrows[0]
3829    xllcorner = infile.xllcorner[0]
3830    yllcorner = infile.yllcorner[0]
3831    cellsize = infile.cellsize[0]
3832    NODATA_value = infile.NODATA_value[0]
3833    zone = infile.zone[0]
3834    false_easting = infile.false_easting[0]
3835    false_northing = infile.false_northing[0]
3836    projection = infile.projection
3837    datum = infile.datum
3838    units = infile.units
3839
3840    dem_elevation = infile.variables['elevation']
3841
3842    #Get output file name
3843    if basename_out == None:
3844        outname = root + '_' + repr(cellsize_new) + '.dem'
3845    else:
3846        outname = basename_out + '.dem'
3847
3848    if verbose: print 'Write decimated NetCDF file to %s' % outname
3849
3850    #Determine some dimensions for decimated grid
3851    (nrows_stencil, ncols_stencil) = stencil.shape
3852    x_offset = ncols_stencil / 2
3853    y_offset = nrows_stencil / 2
3854    cellsize_ratio = int(cellsize_new / cellsize)
3855    ncols_new = 1 + (ncols - ncols_stencil) / cellsize_ratio
3856    nrows_new = 1 + (nrows - nrows_stencil) / cellsize_ratio
3857
3858    #Open netcdf file for output
3859    outfile = NetCDFFile(outname, netcdf_mode_w)
3860
3861    #Create new file
3862    outfile.institution = 'Geoscience Australia'
3863    outfile.description = 'NetCDF DEM format for compact and portable ' \
3864                          'storage of spatial point data'
3865
3866    #Georeferencing
3867    outfile.zone = zone
3868    outfile.projection = projection
3869    outfile.datum = datum
3870    outfile.units = units
3871
3872    outfile.cellsize = cellsize_new
3873    outfile.NODATA_value = NODATA_value
3874    outfile.false_easting = false_easting
3875    outfile.false_northing = false_northing
3876
3877    outfile.xllcorner = xllcorner + (x_offset * cellsize)
3878    outfile.yllcorner = yllcorner + (y_offset * cellsize)
3879    outfile.ncols = ncols_new
3880    outfile.nrows = nrows_new
3881
3882    # dimension definition
3883    outfile.createDimension('number_of_points', nrows_new*ncols_new)
3884
3885    # variable definition
3886    outfile.createVariable('elevation', netcdf_float, ('number_of_points',))
3887
3888    # Get handle to the variable
3889    elevation = outfile.variables['elevation']
3890
3891    dem_elevation_r = num.reshape(dem_elevation, (nrows, ncols))
3892
3893    #Store data
3894    global_index = 0
3895    for i in range(nrows_new):
3896        if verbose: print 'Processing row %d of %d' %(i, nrows_new)
3897
3898        lower_index = global_index
3899        telev = num.zeros(ncols_new, num.float)
3900        local_index = 0
3901        trow = i * cellsize_ratio
3902
3903        for j in range(ncols_new):
3904            tcol = j * cellsize_ratio
3905            tmp = dem_elevation_r[trow:trow+nrows_stencil,
3906                                  tcol:tcol+ncols_stencil]
3907
3908            #if dem contains 1 or more NODATA_values set value in
3909            #decimated dem to NODATA_value, else compute decimated
3910            #value using stencil
3911            if num.sum(num.sum(num.equal(tmp, NODATA_value))) > 0:
3912                telev[local_index] = NODATA_value
3913            else:
3914                telev[local_index] = num.sum(num.sum(tmp * stencil))
3915
3916            global_index += 1
3917            local_index += 1
3918
3919        upper_index = global_index
3920
3921        elevation[lower_index:upper_index] = telev
3922
3923    assert global_index == nrows_new*ncols_new, \
3924           'index not equal to number of points'
3925
3926    infile.close()
3927    outfile.close()
3928
3929
3930##
3931# @brief
3932# @param filename
3933# @param verbose
3934def tsh2sww(filename, verbose=False):
3935    """
3936    to check if a tsh/msh file 'looks' good.
3937    """
3938
3939    if verbose == True:print 'Creating domain from', filename
3940
3941    domain = pmesh_to_domain_instance(filename, Domain)
3942
3943    if verbose == True:print "Number of triangles = ", len(domain)
3944
3945    domain.smooth = True
3946    domain.format = 'sww'   #Native netcdf visualisation format
3947    file_path, filename = path.split(filename)
3948    filename, ext = path.splitext(filename)
3949    domain.set_name(filename)
3950    domain.reduction = mean
3951
3952    if verbose == True:print "file_path",file_path
3953
3954    if file_path == "":
3955        file_path = "."
3956    domain.set_datadir(file_path)
3957
3958    if verbose == True:
3959        print "Output written to " + domain.get_datadir() + sep + \
3960              domain.get_name() + "." + domain.format
3961
3962    sww = get_dataobject(domain)
3963    sww.store_connectivity()
3964    sww.store_timestep()
3965
3966
3967##
3968# @brief Convert CSIRO ESRI file to an SWW boundary file.
3969# @param bath_dir
3970# @param elevation_dir
3971# @param ucur_dir
3972# @param vcur_dir
3973# @param sww_file
3974# @param minlat
3975# @param maxlat
3976# @param minlon
3977# @param maxlon
3978# @param zscale
3979# @param mean_stage
3980# @param fail_on_NaN
3981# @param elevation_NaN_filler
3982# @param bath_prefix
3983# @param elevation_prefix
3984# @param verbose
3985# @note Also convert latitude and longitude to UTM. All coordinates are
3986#       assumed to be given in the GDA94 datum.
3987def asc_csiro2sww(bath_dir,
3988                  elevation_dir,
3989                  ucur_dir,
3990                  vcur_dir,
3991                  sww_file,
3992                  minlat=None, maxlat=None,
3993                  minlon=None, maxlon=None,
3994                  zscale=1,
3995                  mean_stage=0,
3996                  fail_on_NaN=True,
3997                  elevation_NaN_filler=0,
3998                  bath_prefix='ba',
3999                  elevation_prefix='el',
4000                  verbose=False):
4001    """
4002    Produce an sww boundary file, from esri ascii data from CSIRO.
4003
4004    Also convert latitude and longitude to UTM. All coordinates are
4005    assumed to be given in the GDA94 datum.
4006
4007    assume:
4008    All files are in esri ascii format
4009
4010    4 types of information
4011    bathymetry
4012    elevation
4013    u velocity
4014    v velocity
4015
4016    Assumptions
4017    The metadata of all the files is the same
4018    Each type is in a seperate directory
4019    One bath file with extention .000
4020    The time period is less than 24hrs and uniform.
4021    """
4022
4023    from Scientific.IO.NetCDF import NetCDFFile
4024
4025    from anuga.coordinate_transforms.redfearn import redfearn
4026
4027    precision = netcdf_float # So if we want to change the precision its done here
4028
4029    # go in to the bath dir and load the only file,
4030    bath_files = os.listdir(bath_dir)
4031    bath_file = bath_files[0]
4032    bath_dir_file =  bath_dir + os.sep + bath_file
4033    bath_metadata, bath_grid =  _read_asc(bath_dir_file)
4034
4035    #Use the date.time of the bath file as a basis for
4036    #the start time for other files
4037    base_start = bath_file[-12:]
4038
4039    #go into the elevation dir and load the 000 file
4040    elevation_dir_file = elevation_dir  + os.sep + elevation_prefix \
4041                         + base_start
4042
4043    elevation_files = os.listdir(elevation_dir)
4044    ucur_files = os.listdir(ucur_dir)
4045    vcur_files = os.listdir(vcur_dir)
4046    elevation_files.sort()
4047
4048    # the first elevation file should be the
4049    # file with the same base name as the bath data
4050    assert elevation_files[0] == 'el' + base_start
4051
4052    number_of_latitudes = bath_grid.shape[0]
4053    number_of_longitudes = bath_grid.shape[1]
4054    number_of_volumes = (number_of_latitudes-1) * (number_of_longitudes-1) * 2
4055
4056    longitudes = [bath_metadata['xllcorner'] + x*bath_metadata['cellsize'] \
4057                  for x in range(number_of_longitudes)]
4058    latitudes = [bath_metadata['yllcorner'] + y*bath_metadata['cellsize'] \
4059                 for y in range(number_of_latitudes)]
4060
4061     # reverse order of lat, so the first lat represents the first grid row
4062    latitudes.reverse()
4063
4064    kmin, kmax, lmin, lmax = _get_min_max_indexes(latitudes[:],longitudes[:],
4065                                                  minlat=minlat, maxlat=maxlat,
4066                                                  minlon=minlon, maxlon=maxlon)
4067
4068    bath_grid = bath_grid[kmin:kmax,lmin:lmax]
4069    latitudes = latitudes[kmin:kmax]
4070    longitudes = longitudes[lmin:lmax]
4071    number_of_latitudes = len(latitudes)
4072    number_of_longitudes = len(longitudes)
4073    number_of_times = len(os.listdir(elevation_dir))
4074    number_of_points = number_of_latitudes * number_of_longitudes
4075    number_of_volumes = (number_of_latitudes-1) * (number_of_longitudes-1) * 2
4076
4077    #Work out the times
4078    if len(elevation_files) > 1:
4079        # Assume: The time period is less than 24hrs.
4080        time_period = (int(elevation_files[1][-3:]) \
4081                       - int(elevation_files[0][-3:])) * 60*60
4082        times = [x*time_period for x in range(len(elevation_files))]
4083    else:
4084        times = [0.0]
4085
4086    if verbose:
4087        print '------------------------------------------------'
4088        print 'Statistics:'
4089        print '  Extent (lat/lon):'
4090        print '    lat in [%f, %f], len(lat) == %d' \
4091              % (min(latitudes), max(latitudes), len(latitudes))
4092        print '    lon in [%f, %f], len(lon) == %d' \
4093              % (min(longitudes), max(longitudes), len(longitudes))
4094        print '    t in [%f, %f], len(t) == %d' \
4095              % (min(times), max(times), len(times))
4096
4097    ######### WRITE THE SWW FILE #############
4098
4099    # NetCDF file definition
4100    outfile = NetCDFFile(sww_file, netcdf_mode_w)
4101
4102    #Create new file
4103    outfile.institution = 'Geoscience Australia'
4104    outfile.description = 'Converted from XXX'
4105
4106    #For sww compatibility
4107    outfile.smoothing = 'Yes'
4108    outfile.order = 1
4109
4110    #Start time in seconds since the epoch (midnight 1/1/1970)
4111    outfile.starttime = starttime = times[0]
4112
4113    # dimension definitions
4114    outfile.createDimension('number_of_volumes', number_of_volumes)
4115    outfile.createDimension('number_of_vertices', 3)
4116    outfile.createDimension('number_of_points', number_of_points)
4117    outfile.createDimension('number_of_timesteps', number_of_times)
4118
4119    # variable definitions
4120    outfile.createVariable('x', precision, ('number_of_points',))
4121    outfile.createVariable('y', precision, ('number_of_points',))
4122    outfile.createVariable('elevation', precision, ('number_of_points',))
4123
4124    #FIXME: Backwards compatibility
4125    outfile.createVariable('z', precision, ('number_of_points',))
4126    #################################
4127
4128    outfile.createVariable('volumes', netcdf_int, ('number_of_volumes',
4129                                                   'number_of_vertices'))
4130
4131    outfile.createVariable('time', precision, ('number_of_timesteps',))
4132
4133    outfile.createVariable('stage', precision, ('number_of_timesteps',
4134                                                'number_of_points'))
4135
4136    outfile.createVariable('xmomentum', precision, ('number_of_timesteps',
4137                                                    'number_of_points'))
4138
4139    outfile.createVariable('ymomentum', precision, ('number_of_timesteps',
4140                                                    'number_of_points'))
4141
4142    #Store
4143    from anuga.coordinate_transforms.redfearn import redfearn
4144
4145    x = num.zeros(number_of_points, num.float)  #Easting
4146    y = num.zeros(number_of_points, num.float)  #Northing
4147
4148    if verbose: print 'Making triangular grid'
4149
4150    #Get zone of 1st point.
4151    refzone, _, _ = redfearn(latitudes[0], longitudes[0])
4152
4153    vertices = {}
4154    i = 0
4155    for k, lat in enumerate(latitudes):
4156        for l, lon in enumerate(longitudes):
4157            vertices[l,k] = i
4158
4159            zone, easting, northing = redfearn(lat, lon)
4160
4161            #msg = 'Zone boundary crossed at longitude =', lon
4162            #assert zone == refzone, msg
4163            #print '%7.2f %7.2f %8.2f %8.2f' %(lon, lat, easting, northing)
4164            x[i] = easting
4165            y[i] = northing
4166            i += 1
4167
4168    #Construct 2 triangles per 'rectangular' element
4169    volumes = []
4170    for l in range(number_of_longitudes-1):    #X direction
4171        for k in range(number_of_latitudes-1): #Y direction
4172            v1 = vertices[l,k+1]
4173            v2 = vertices[l,k]
4174            v3 = vertices[l+1,k+1]
4175            v4 = vertices[l+1,k]
4176
4177            #Note, this is different to the ferrit2sww code
4178            #since the order of the lats is reversed.
4179            volumes.append([v1,v3,v2]) #Upper element
4180            volumes.append([v4,v2,v3]) #Lower element
4181
4182    volumes = num.array(volumes, num.int)      #array default#
4183
4184    geo_ref = Geo_reference(refzone, min(x), min(y))
4185    geo_ref.write_NetCDF(outfile)
4186
4187    # This will put the geo ref in the middle
4188    #geo_ref = Geo_reference(refzone, (max(x)+min(x))/2., (max(x)+min(y))/2.)
4189
4190    if verbose:
4191        print '------------------------------------------------'
4192        print 'More Statistics:'
4193        print '  Extent (/lon):'
4194        print '    x in [%f, %f], len(lat) == %d' \
4195              % (min(x), max(x), len(x))
4196        print '    y in [%f, %f], len(lon) == %d' \
4197              % (min(y), max(y), len(y))
4198        print 'geo_ref: ', geo_ref
4199
4200    z = num.resize(bath_grid,outfile.variables['z'][:].shape)
4201    outfile.variables['x'][:] = x - geo_ref.get_xllcorner()
4202    outfile.variables['y'][:] = y - geo_ref.get_yllcorner()
4203# FIXME (Ole): Remove once viewer has been recompiled and changed
4204#              to use elevation instead of z
4205    outfile.variables['z'][:] = z
4206    outfile.variables['elevation'][:] = z
4207    outfile.variables['volumes'][:] = volumes.astype(num.int32) # On Opteron 64
4208
4209    stage = outfile.variables['stage']
4210    xmomentum = outfile.variables['xmomentum']
4211    ymomentum = outfile.variables['ymomentum']
4212
4213    outfile.variables['time'][:] = times   #Store time relative
4214
4215    if verbose: print 'Converting quantities'
4216
4217    n = number_of_times
4218    for j in range(number_of_times):
4219        # load in files
4220        elevation_meta, elevation_grid = \
4221            _read_asc(elevation_dir + os.sep + elevation_files[j])
4222
4223        _, u_momentum_grid = _read_asc(ucur_dir + os.sep + ucur_files[j])
4224        _, v_momentum_grid = _read_asc(vcur_dir + os.sep + vcur_files[j])
4225
4226        #cut matrix to desired size
4227        elevation_grid = elevation_grid[kmin:kmax,lmin:lmax]
4228        u_momentum_grid = u_momentum_grid[kmin:kmax,lmin:lmax]
4229        v_momentum_grid = v_momentum_grid[kmin:kmax,lmin:lmax]
4230
4231        # handle missing values
4232        missing = (elevation_grid == elevation_meta['NODATA_value'])
4233        if num.sometrue (missing):
4234            if fail_on_NaN:
4235                msg = 'File %s contains missing values' \
4236                      % (elevation_files[j])
4237                raise DataMissingValuesError, msg
4238            else:
4239                elevation_grid = elevation_grid*(missing==0) \
4240                                 + missing*elevation_NaN_filler
4241
4242        if verbose and j % ((n+10)/10) == 0: print '  Doing %d of %d' % (j, n)
4243
4244        i = 0
4245        for k in range(number_of_latitudes):      #Y direction
4246            for l in range(number_of_longitudes): #X direction
4247                w = zscale*elevation_grid[k,l] + mean_stage
4248                stage[j,i] = w
4249                h = w - z[i]
4250                xmomentum[j,i] = u_momentum_grid[k,l]*h
4251                ymomentum[j,i] = v_momentum_grid[k,l]*h
4252                i += 1
4253
4254    outfile.close()
4255
4256
4257##
4258# @brief Return max&min indexes (for slicing) of area specified.
4259# @param latitudes_ref ??
4260# @param longitudes_ref ??
4261# @param minlat Minimum latitude of specified area.
4262# @param maxlat Maximum latitude of specified area.
4263# @param minlon Minimum longitude of specified area.
4264# @param maxlon Maximum longitude of specified area.
4265# @return Tuple (lat_min_index, lat_max_index, lon_min_index, lon_max_index)
4266def _get_min_max_indexes(latitudes_ref,longitudes_ref,
4267                         minlat=None, maxlat=None,
4268                         minlon=None, maxlon=None):
4269    """
4270    Return max, min indexes (for slicing) of the lat and long arrays to cover
4271    the area specified with min/max lat/long.
4272
4273    Think of the latitudes and longitudes describing a 2d surface.
4274    The area returned is, if possible, just big enough to cover the
4275    inputed max/min area. (This will not be possible if the max/min area
4276    has a section outside of the latitudes/longitudes area.)
4277
4278    asset  longitudes are sorted,
4279    long - from low to high (west to east, eg 148 - 151)
4280    assert latitudes are sorted, ascending or decending
4281    """
4282
4283    latitudes = latitudes_ref[:]
4284    longitudes = longitudes_ref[:]
4285
4286    latitudes = ensure_numeric(latitudes)
4287    longitudes = ensure_numeric(longitudes)
4288
4289    assert num.allclose(num.sort(longitudes), longitudes)
4290
4291    #print latitudes[0],longitudes[0]
4292    #print len(latitudes),len(longitudes)
4293    #print latitudes[len(latitudes)-1],longitudes[len(longitudes)-1]
4294
4295    lat_ascending = True
4296    if not num.allclose(num.sort(latitudes), latitudes):
4297        lat_ascending = False
4298        # reverse order of lat, so it's in ascending order
4299        latitudes = latitudes[::-1]
4300        assert num.allclose(num.sort(latitudes), latitudes)
4301
4302    largest_lat_index = len(latitudes)-1
4303
4304    #Cut out a smaller extent.
4305    if minlat == None:
4306        lat_min_index = 0
4307    else:
4308        lat_min_index = num.searchsorted(latitudes, minlat)-1
4309        if lat_min_index <0:
4310            lat_min_index = 0
4311
4312    if maxlat == None:
4313        lat_max_index = largest_lat_index #len(latitudes)
4314    else:
4315        lat_max_index = num.searchsorted(latitudes, maxlat)
4316        if lat_max_index > largest_lat_index:
4317            lat_max_index = largest_lat_index
4318
4319    if minlon == None:
4320        lon_min_index = 0
4321    else:
4322        lon_min_index = num.searchsorted(longitudes, minlon)-1
4323        if lon_min_index <0:
4324            lon_min_index = 0
4325
4326    if maxlon == None:
4327        lon_max_index = len(longitudes)
4328    else:
4329        lon_max_index = num.searchsorted(longitudes, maxlon)
4330
4331    # Reversing the indexes, if the lat array is decending
4332    if lat_ascending is False:
4333        lat_min_index, lat_max_index = largest_lat_index - lat_max_index, \
4334                                       largest_lat_index - lat_min_index
4335    lat_max_index = lat_max_index + 1 # taking into account how slicing works
4336    lon_max_index = lon_max_index + 1 # taking into account how slicing works
4337
4338    return lat_min_index, lat_max_index, lon_min_index, lon_max_index
4339
4340
4341##
4342# @brief Read an ASC file.
4343# @parem filename Path to the file to read.
4344# @param verbose True if this function is to be verbose.
4345def _read_asc(filename, verbose=False):
4346    """Read esri file from the following ASCII format (.asc)
4347
4348    Example:
4349
4350    ncols         3121
4351    nrows         1800
4352    xllcorner     722000
4353    yllcorner     5893000
4354    cellsize      25
4355    NODATA_value  -9999
4356    138.3698 137.4194 136.5062 135.5558 ..........
4357    """
4358
4359    datafile = open(filename)
4360
4361    if verbose: print 'Reading DEM from %s' % filename
4362
4363    lines = datafile.readlines()
4364    datafile.close()
4365
4366    if verbose: print 'Got', len(lines), ' lines'
4367
4368    ncols = int(lines.pop(0).split()[1].strip())
4369    nrows = int(lines.pop(0).split()[1].strip())
4370    xllcorner = float(lines.pop(0).split()[1].strip())
4371    yllcorner = float(lines.pop(0).split()[1].strip())
4372    cellsize = float(lines.pop(0).split()[1].strip())
4373    NODATA_value = float(lines.pop(0).split()[1].strip())
4374
4375    assert len(lines) == nrows
4376
4377    #Store data
4378    grid = []
4379
4380    n = len(lines)
4381    for i, line in enumerate(lines):
4382        cells = line.split()
4383        assert len(cells) == ncols
4384        grid.append(num.array([float(x) for x in cells]))
4385    grid = num.array(grid)
4386
4387    return {'xllcorner':xllcorner,
4388            'yllcorner':yllcorner,
4389            'cellsize':cellsize,
4390            'NODATA_value':NODATA_value}, grid
4391
4392
4393    ####  URS 2 SWW  ###
4394
4395# Definitions of various NetCDF dimension names, etc.
4396lon_name = 'LON'
4397lat_name = 'LAT'
4398time_name = 'TIME'
4399precision = netcdf_float # So if we want to change the precision its done here
4400
4401##
4402# @brief Clas for a NetCDF data file writer.
4403class Write_nc:
4404    """Write an nc file.
4405
4406    Note, this should be checked to meet cdc netcdf conventions for gridded
4407    data. http://www.cdc.noaa.gov/cdc/conventions/cdc_netcdf_standard.shtml
4408    """
4409
4410    ##
4411    # @brief Instantiate a Write_nc instance.
4412    # @param quantity_name
4413    # @param file_name
4414    # @param time_step_count The number of time steps.
4415    # @param time_step The time_step size.
4416    # @param lon
4417    # @param lat
4418    def __init__(self,
4419                 quantity_name,
4420                 file_name,
4421                 time_step_count,
4422                 time_step,
4423                 lon,
4424                 lat):
4425        """Instantiate a Write_nc instance (NetCDF file writer).
4426
4427        time_step_count is the number of time steps.
4428        time_step is the time step size
4429
4430        pre-condition: quantity_name must be 'HA', 'UA'or 'VA'.
4431        """
4432
4433        self.quantity_name = quantity_name
4434        quantity_units = {'HA':'CENTIMETERS',
4435                          'UA':'CENTIMETERS/SECOND',
4436                          'VA':'CENTIMETERS/SECOND'}
4437
4438        multiplier_dic = {'HA':100.0,   # To convert from m to cm
4439                          'UA':100.0,   #             and m/s to cm/sec
4440                          'VA':-100.0}  # MUX files have positive x in the
4441                                        # Southern direction.  This corrects
4442                                        # for it, when writing nc files.
4443
4444        self.quantity_multiplier =  multiplier_dic[self.quantity_name]
4445
4446        #self.file_name = file_name
4447        self.time_step_count = time_step_count
4448        self.time_step = time_step
4449
4450        # NetCDF file definition
4451        self.outfile = NetCDFFile(file_name, netcdf_mode_w)
4452        outfile = self.outfile
4453
4454        #Create new file
4455        nc_lon_lat_header(outfile, lon, lat)
4456
4457        # TIME
4458        outfile.createDimension(time_name, None)
4459        outfile.createVariable(time_name, precision, (time_name,))
4460
4461        #QUANTITY
4462        outfile.createVariable(self.quantity_name, precision,
4463                               (time_name, lat_name, lon_name))
4464        outfile.variables[self.quantity_name].missing_value = -1.e+034
4465        outfile.variables[self.quantity_name].units = \
4466                                 quantity_units[self.quantity_name]
4467        outfile.variables[lon_name][:]= ensure_numeric(lon)
4468        outfile.variables[lat_name][:]= ensure_numeric(lat)
4469
4470        #Assume no one will be wanting to read this, while we are writing
4471        #outfile.close()
4472
4473    ##
4474    # @brief Write a time-step of quantity data.
4475    # @param quantity_slice The data to be stored for this time-step.
4476    def store_timestep(self, quantity_slice):
4477        """Write a time slice of quantity info
4478
4479        quantity_slice is the data to be stored at this time step
4480        """
4481
4482        # Get the variables
4483        time = self.outfile.variables[time_name]
4484        quantity = self.outfile.variables[self.quantity_name]
4485
4486        # get index oflice to write
4487        i = len(time)
4488
4489        #Store time
4490        time[i] = i * self.time_step    #self.domain.time
4491        quantity[i,:] = quantity_slice * self.quantity_multiplier
4492
4493    ##
4494    # @brief Close file underlying the class instance.
4495    def close(self):
4496        self.outfile.close()
4497
4498
4499##
4500# @brief Convert URS file to SWW file.
4501# @param basename_in Stem of the input filename.
4502# @param basename_out Stem of the output filename.
4503# @param verbose True if this function is to be verbose.
4504# @param remove_nc_files
4505# @param minlat Sets extent of area to be used.  If not supplied, full extent.
4506# @param maxlat Sets extent of area to be used.  If not supplied, full extent.
4507# @param minlon Sets extent of area to be used.  If not supplied, full extent.
4508# @param maxlon Sets extent of area to be used.  If not supplied, full extent.
4509# @param mint
4510# @param maxt
4511# @param mean_stage
4512# @param origin A 3-tuple with geo referenced UTM coordinates
4513# @param zscale
4514# @param fail_on_NaN
4515# @param NaN_filler
4516# @param elevation
4517# @note Also convert latitude and longitude to UTM. All coordinates are
4518#       assumed to be given in the GDA94 datum.
4519def urs2sww(basename_in='o', basename_out=None, verbose=False,
4520            remove_nc_files=True,
4521            minlat=None, maxlat=None,
4522            minlon=None, maxlon=None,
4523            mint=None, maxt=None,
4524            mean_stage=0,
4525            origin=None,
4526            zscale=1,
4527            fail_on_NaN=True,
4528            NaN_filler=0,
4529            elevation=None):
4530    """Convert a URS file to an SWW file.
4531    Convert URS C binary format for wave propagation to
4532    sww format native to abstract_2d_finite_volumes.
4533
4534    Specify only basename_in and read files of the form
4535    basefilename-z-mux2, basefilename-e-mux2 and
4536    basefilename-n-mux2 containing relative height,
4537    x-velocity and y-velocity, respectively.
4538
4539    Also convert latitude and longitude to UTM. All coordinates are
4540    assumed to be given in the GDA94 datum. The latitude and longitude
4541    information is for  a grid.
4542
4543    min's and max's: If omitted - full extend is used.
4544    To include a value min may equal it, while max must exceed it.
4545    Lat and lon are assumed to be in decimal degrees.
4546    NOTE: minlon is the most east boundary.
4547
4548    origin is a 3-tuple with geo referenced
4549    UTM coordinates (zone, easting, northing)
4550    It will be the origin of the sww file. This shouldn't be used,
4551    since all of anuga should be able to handle an arbitary origin.
4552
4553    URS C binary format has data orgainised as TIME, LONGITUDE, LATITUDE
4554    which means that latitude is the fastest
4555    varying dimension (row major order, so to speak)
4556
4557    In URS C binary the latitudes and longitudes are in assending order.
4558    """
4559
4560    if basename_out == None:
4561        basename_out = basename_in
4562
4563    files_out = urs2nc(basename_in, basename_out)
4564
4565    ferret2sww(basename_out,
4566               minlat=minlat,
4567               maxlat=maxlat,
4568               minlon=minlon,
4569               maxlon=maxlon,
4570               mint=mint,
4571               maxt=maxt,
4572               mean_stage=mean_stage,
4573               origin=origin,
4574               zscale=zscale,
4575               fail_on_NaN=fail_on_NaN,
4576               NaN_filler=NaN_filler,
4577               inverted_bathymetry=True,
4578               verbose=verbose)
4579   
4580    if remove_nc_files:
4581        for file_out in files_out:
4582            os.remove(file_out)
4583
4584
4585##
4586# @brief Convert 3 URS files back to 4 NC files.
4587# @param basename_in Stem of the input filenames.
4588# @param basename_outStem of the output filenames.
4589# @note The name of the urs file names must be:
4590#          [basename_in]-z-mux
4591#          [basename_in]-e-mux
4592#          [basename_in]-n-mux
4593def urs2nc(basename_in='o', basename_out='urs'):
4594    """Convert the 3 urs files to 4 nc files.
4595
4596    The name of the urs file names must be;
4597    [basename_in]-z-mux
4598    [basename_in]-e-mux
4599    [basename_in]-n-mux
4600    """
4601
4602    files_in = [basename_in + WAVEHEIGHT_MUX_LABEL,
4603                basename_in + EAST_VELOCITY_LABEL,
4604                basename_in + NORTH_VELOCITY_LABEL]
4605    files_out = [basename_out + '_ha.nc',
4606                 basename_out + '_ua.nc',
4607                 basename_out + '_va.nc']
4608    quantities = ['HA', 'UA', 'VA']
4609
4610    #if os.access(files_in[0]+'.mux', os.F_OK) == 0 :
4611    for i, file_name in enumerate(files_in):
4612        if os.access(file_name, os.F_OK) == 0:
4613            if os.access(file_name + '.mux', os.F_OK) == 0 :
4614                msg = 'File %s does not exist or is not accessible' % file_name
4615                raise IOError, msg
4616            else:
4617               files_in[i] += '.mux'
4618               print "file_name", file_name
4619
4620    hashed_elevation = None
4621    for file_in, file_out, quantity in map(None, files_in,
4622                                           files_out,
4623                                           quantities):
4624        lonlatdep, lon, lat, depth = _binary_c2nc(file_in,
4625                                                  file_out,
4626                                                  quantity)
4627        if hashed_elevation == None:
4628            elevation_file = basename_out + '_e.nc'
4629            write_elevation_nc(elevation_file,
4630                               lon,
4631                               lat,
4632                               depth)
4633            hashed_elevation = myhash(lonlatdep)
4634        else:
4635            msg = "The elevation information in the mux files is inconsistent"
4636            assert hashed_elevation == myhash(lonlatdep), msg
4637
4638    files_out.append(elevation_file)
4639
4640    return files_out
4641
4642
4643##
4644# @brief Convert a quantity URS file to a NetCDF file.
4645# @param file_in Path to input URS file.
4646# @param file_out Path to the output file.
4647# @param quantity Name of the quantity to be written to the output file.
4648# @return A tuple (lonlatdep, lon, lat, depth).
4649def _binary_c2nc(file_in, file_out, quantity):
4650    """Reads in a quantity urs file and writes a quantity nc file.
4651    Additionally, returns the depth and lat, long info,
4652    so it can be written to a file.
4653    """
4654
4655    columns = 3                           # long, lat , depth
4656    mux_file = open(file_in, 'rb')
4657
4658    # Number of points/stations
4659    (points_num,) = unpack('i', mux_file.read(4))
4660
4661    # nt, int - Number of time steps
4662    (time_step_count,) = unpack('i', mux_file.read(4))
4663
4664    #dt, float - time step, seconds
4665    (time_step,) = unpack('f', mux_file.read(4))
4666
4667    msg = "Bad data in the mux file."
4668    if points_num < 0:
4669        mux_file.close()
4670        raise ANUGAError, msg
4671    if time_step_count < 0:
4672        mux_file.close()
4673        raise ANUGAError, msg
4674    if time_step < 0:
4675        mux_file.close()
4676        raise ANUGAError, msg
4677
4678    lonlatdep = p_array.array('f')
4679    lonlatdep.read(mux_file, columns * points_num)
4680    lonlatdep = num.array(lonlatdep, dtype=num.float)
4681    lonlatdep = num.reshape(lonlatdep, (points_num, columns))
4682
4683    lon, lat, depth = lon_lat2grid(lonlatdep)
4684    lon_sorted = list(lon)
4685    lon_sorted.sort()
4686
4687    if not num.alltrue(lon == lon_sorted):
4688        msg = "Longitudes in mux file are not in ascending order"
4689        raise IOError, msg
4690
4691    lat_sorted = list(lat)
4692    lat_sorted.sort()
4693
4694    nc_file = Write_nc(quantity,
4695                       file_out,
4696                       time_step_count,
4697                       time_step,
4698                       lon,
4699                       lat)
4700
4701    for i in range(time_step_count):
4702        #Read in a time slice from mux file
4703        hz_p_array = p_array.array('f')
4704        hz_p_array.read(mux_file, points_num)
4705        hz_p = num.array(hz_p_array, dtype=num.float)
4706        hz_p = num.reshape(hz_p, (len(lon), len(lat)))
4707        hz_p = num.transpose(hz_p)  # mux has lat varying fastest, nc has long v.f.
4708
4709        #write time slice to nc file
4710        nc_file.store_timestep(hz_p)
4711
4712    mux_file.close()
4713    nc_file.close()
4714
4715    return lonlatdep, lon, lat, depth
4716
4717
4718##
4719# @brief Write an NC elevation file.
4720# @param file_out Path to the output file.
4721# @param lon ??
4722# @param lat ??
4723# @param depth_vector The elevation data to write.
4724def write_elevation_nc(file_out, lon, lat, depth_vector):
4725    """Write an nc elevation file."""
4726
4727    # NetCDF file definition
4728    outfile = NetCDFFile(file_out, netcdf_mode_w)
4729
4730    #Create new file
4731    nc_lon_lat_header(outfile, lon, lat)
4732
4733    # ELEVATION
4734    zname = 'ELEVATION'
4735    outfile.createVariable(zname, precision, (lat_name, lon_name))
4736    outfile.variables[zname].units = 'CENTIMETERS'
4737    outfile.variables[zname].missing_value = -1.e+034
4738
4739    outfile.variables[lon_name][:] = ensure_numeric(lon)
4740    outfile.variables[lat_name][:] = ensure_numeric(lat)
4741
4742    depth = num.reshape(depth_vector, (len(lat), len(lon)))
4743    outfile.variables[zname][:] = depth
4744
4745    outfile.close()
4746
4747
4748##
4749# @brief Write lat/lon headers to a NetCDF file.
4750# @param outfile Handle to open file to write to.
4751# @param lon An iterable of the longitudes.
4752# @param lat An iterable of the latitudes.
4753# @note Defines lat/long dimensions and variables. Sets various attributes:
4754#          .point_spacing  and  .units
4755#       and writes lat/lon data.
4756
4757def nc_lon_lat_header(outfile, lon, lat):
4758    """Write lat/lon headers to a NetCDF file.
4759
4760    outfile is the netcdf file handle.
4761    lon - a list/array of the longitudes
4762    lat - a list/array of the latitudes
4763    """
4764
4765    outfile.institution = 'Geoscience Australia'
4766    outfile.description = 'Converted from URS binary C'
4767
4768    # Longitude
4769    outfile.createDimension(lon_name, len(lon))
4770    outfile.createVariable(lon_name, precision, (lon_name,))
4771    outfile.variables[lon_name].point_spacing = 'uneven'
4772    outfile.variables[lon_name].units = 'degrees_east'
4773    outfile.variables[lon_name].assignValue(lon)
4774
4775    # Latitude
4776    outfile.createDimension(lat_name, len(lat))
4777    outfile.createVariable(lat_name, precision, (lat_name,))
4778    outfile.variables[lat_name].point_spacing = 'uneven'
4779    outfile.variables[lat_name].units = 'degrees_north'
4780    outfile.variables[lat_name].assignValue(lat)
4781
4782
4783##
4784# @brief
4785# @param long_lat_dep
4786# @return A tuple (long, lat, quantity).
4787# @note The latitude is the fastest varying dimension - in mux files.
4788def lon_lat2grid(long_lat_dep):
4789    """
4790    given a list of points that are assumed to be an a grid,
4791    return the long's and lat's of the grid.
4792    long_lat_dep is an array where each row is a position.
4793    The first column is longitudes.
4794    The second column is latitudes.
4795
4796    The latitude is the fastest varying dimension - in mux files
4797    """
4798
4799    LONG = 0
4800    LAT = 1
4801    QUANTITY = 2
4802
4803    long_lat_dep = ensure_numeric(long_lat_dep, num.float)
4804
4805    num_points = long_lat_dep.shape[0]
4806    this_rows_long = long_lat_dep[0,LONG]
4807
4808    # Count the length of unique latitudes
4809    i = 0
4810    while long_lat_dep[i,LONG] == this_rows_long and i < num_points:
4811        i += 1
4812
4813    # determine the lats and longsfrom the grid
4814    lat = long_lat_dep[:i, LAT]
4815    long = long_lat_dep[::i, LONG]
4816
4817    lenlong = len(long)
4818    lenlat = len(lat)
4819
4820    msg = 'Input data is not gridded'
4821    assert num_points % lenlat == 0, msg
4822    assert num_points % lenlong == 0, msg
4823
4824    # Test that data is gridded
4825    for i in range(lenlong):
4826        msg = 'Data is not gridded.  It must be for this operation'
4827        first = i * lenlat
4828        last = first + lenlat
4829
4830        assert num.allclose(long_lat_dep[first:last,LAT], lat), msg
4831        assert num.allclose(long_lat_dep[first:last,LONG], long[i]), msg
4832
4833    msg = 'Out of range latitudes/longitudes'
4834    for l in lat:assert -90 < l < 90 , msg
4835    for l in long:assert -180 < l < 180 , msg
4836
4837    # Changing quantity from lat being the fastest varying dimension to
4838    # long being the fastest varying dimension
4839    # FIXME - make this faster/do this a better way
4840    # use numeric transpose, after reshaping the quantity vector
4841    quantity = num.zeros(num_points, num.float)
4842
4843    for lat_i, _ in enumerate(lat):
4844        for long_i, _ in enumerate(long):
4845            q_index = lat_i*lenlong + long_i
4846            lld_index = long_i*lenlat + lat_i
4847            temp = long_lat_dep[lld_index, QUANTITY]
4848            quantity[q_index] = temp
4849
4850    return long, lat, quantity
4851
4852################################################################################
4853# END URS 2 SWW
4854################################################################################
4855
4856################################################################################
4857# URS UNGRIDDED 2 SWW
4858################################################################################
4859
4860### PRODUCING THE POINTS NEEDED FILE ###
4861
4862# Ones used for FESA 2007 results
4863#LL_LAT = -50.0
4864#LL_LONG = 80.0
4865#GRID_SPACING = 1.0/60.0
4866#LAT_AMOUNT = 4800
4867#LONG_AMOUNT = 3600
4868
4869
4870##
4871# @brief
4872# @param file_name
4873# @param boundary_polygon
4874# @param zone
4875# @param ll_lat
4876# @param ll_long
4877# @param grid_spacing
4878# @param lat_amount
4879# @param long_amount
4880# @param isSouthernHemisphere
4881# @param export_csv
4882# @param use_cache
4883# @param verbose True if this function is to be verbose.
4884# @return
4885def URS_points_needed_to_file(file_name, boundary_polygon, zone,
4886                              ll_lat, ll_long,
4887                              grid_spacing,
4888                              lat_amount, long_amount,
4889                              isSouthernHemisphere=True,
4890                              export_csv=False, use_cache=False,
4891                              verbose=False):
4892    """
4893    Given the info to replicate the URS grid and a polygon output
4894    a file that specifies the cloud of boundary points for URS.
4895
4896    This creates a .urs file.  This is in the format used by URS;
4897    1st line is the number of points,
4898    each line after represents a point,in lats and longs.
4899
4900    Note: The polygon cannot cross zones or hemispheres.
4901
4902    A work-a-round for different zones or hemispheres is to run this twice,
4903    once for each zone, and then combine the output.
4904
4905    file_name - name of the urs file produced for David.
4906    boundary_polygon - a list of points that describes a polygon.
4907                      The last point is assumed ot join the first point.
4908                      This is in UTM (lat long would be better though)
4909
4910     This is info about the URS model that needs to be inputted.
4911
4912    ll_lat - lower left latitude, in decimal degrees
4913    ll-long - lower left longitude, in decimal degrees
4914    grid_spacing - in deciamal degrees
4915    lat_amount - number of latitudes
4916    long_amount- number of longs
4917
4918    Don't add the file extension.  It will be added.
4919    """
4920
4921    geo = URS_points_needed(boundary_polygon, zone, ll_lat, ll_long,
4922                            grid_spacing,
4923                            lat_amount, long_amount, isSouthernHemisphere,
4924                            use_cache, verbose)
4925
4926    if not file_name[-4:] == ".urs":
4927        file_name += ".urs"
4928
4929    geo.export_points_file(file_name, isSouthHemisphere=isSouthernHemisphere)
4930
4931    if export_csv:
4932        if file_name[-4:] == ".urs":
4933            file_name = file_name[:-4] + ".csv"
4934        geo.export_points_file(file_name)
4935
4936    return geo
4937
4938
4939##
4940# @brief
4941# @param boundary_polygon
4942# @param zone
4943# @param ll_lat
4944# @param ll_long
4945# @param grid_spacing
4946# @param lat_amount
4947# @param long_amount
4948# @param isSouthHemisphere
4949# @param use_cache
4950# @param verbose
4951def URS_points_needed(boundary_polygon, zone, ll_lat,
4952                      ll_long, grid_spacing,
4953                      lat_amount, long_amount, isSouthHemisphere=True,
4954                      use_cache=False, verbose=False):
4955    args = (boundary_polygon,
4956            zone, ll_lat,
4957            ll_long, grid_spacing,
4958            lat_amount, long_amount, isSouthHemisphere)
4959    kwargs = {}
4960
4961    if use_cache is True:
4962        try:
4963            from anuga.caching import cache
4964        except:
4965            msg = 'Caching was requested, but caching module' \
4966                  'could not be imported'
4967            raise msg
4968
4969        geo = cache(_URS_points_needed,
4970                    args, kwargs,
4971                    verbose=verbose,
4972                    compression=False)
4973    else:
4974        geo = apply(_URS_points_needed, args, kwargs)
4975
4976    return geo
4977
4978
4979##
4980# @brief
4981# @param boundary_polygon An iterable of points that describe a polygon.
4982# @param zone
4983# @param ll_lat Lower left latitude, in decimal degrees
4984# @param ll_long Lower left longitude, in decimal degrees
4985# @param grid_spacing Grid spacing in decimal degrees.
4986# @param lat_amount
4987# @param long_amount
4988# @param isSouthHemisphere
4989def _URS_points_needed(boundary_polygon,
4990                       zone, ll_lat,
4991                       ll_long, grid_spacing,
4992                       lat_amount, long_amount,
4993                       isSouthHemisphere):
4994    """
4995    boundary_polygon - a list of points that describes a polygon.
4996                      The last point is assumed ot join the first point.
4997                      This is in UTM (lat long would b better though)
4998
4999    ll_lat - lower left latitude, in decimal degrees
5000    ll-long - lower left longitude, in decimal degrees
5001    grid_spacing - in decimal degrees
5002
5003    """
5004
5005    from sets import ImmutableSet
5006
5007    msg = "grid_spacing can not be zero"
5008    assert not grid_spacing == 0, msg
5009
5010    a = boundary_polygon
5011
5012    # List of segments.  Each segment is two points.
5013    segs = [i and [a[i-1], a[i]] or [a[len(a)-1], a[0]] for i in range(len(a))]
5014
5015    # convert the segs to Lat's and longs.
5016    # Don't assume the zone of the segments is the same as the lower left
5017    # corner of the lat long data!!  They can easily be in different zones
5018    lat_long_set = ImmutableSet()
5019    for seg in segs:
5020        points_lat_long = points_needed(seg, ll_lat, ll_long, grid_spacing,
5021                                        lat_amount, long_amount, zone,
5022                                        isSouthHemisphere)
5023        lat_long_set |= ImmutableSet(points_lat_long)
5024
5025    if lat_long_set == ImmutableSet([]):
5026        msg = "URS region specified and polygon does not overlap."
5027        raise ValueError, msg
5028
5029    # Warning there is no info in geospatial saying the hemisphere of
5030    # these points.  There should be.
5031    geo = Geospatial_data(data_points=list(lat_long_set),
5032                          points_are_lats_longs=True)
5033
5034    return geo
5035
5036
5037##
5038# @brief Get the points that are needed to interpolate any point a a segment.
5039# @param seg Two points in the UTM.
5040# @param ll_lat Lower left latitude, in decimal degrees
5041# @param ll_long Lower left longitude, in decimal degrees
5042# @param grid_spacing
5043# @param lat_amount
5044# @param long_amount
5045# @param zone
5046# @param isSouthHemisphere
5047# @return A list of points.
5048def points_needed(seg, ll_lat, ll_long, grid_spacing,
5049                  lat_amount, long_amount, zone,
5050                  isSouthHemisphere):
5051    """
5052    seg is two points, in UTM
5053    return a list of the points, in lats and longs that are needed to
5054    interpolate any point on the segment.
5055    """
5056
5057    from math import sqrt
5058
5059    geo_reference = Geo_reference(zone=zone)
5060    geo = Geospatial_data(seg, geo_reference=geo_reference)
5061    seg_lat_long = geo.get_data_points(as_lat_long=True,
5062                                       isSouthHemisphere=isSouthHemisphere)
5063
5064    # 1.415 = 2^0.5, rounded up....
5065    sqrt_2_rounded_up = 1.415
5066    buffer = sqrt_2_rounded_up * grid_spacing
5067
5068    max_lat = max(seg_lat_long[0][0], seg_lat_long[1][0]) + buffer
5069    max_long = max(seg_lat_long[0][1], seg_lat_long[1][1]) + buffer
5070    min_lat = min(seg_lat_long[0][0], seg_lat_long[1][0]) - buffer
5071    min_long = min(seg_lat_long[0][1], seg_lat_long[1][1]) - buffer
5072
5073    first_row = (min_long - ll_long) / grid_spacing
5074
5075    # To round up
5076    first_row_long = int(round(first_row + 0.5))
5077
5078    last_row = (max_long - ll_long) / grid_spacing # round down
5079    last_row_long = int(round(last_row))
5080
5081    first_row = (min_lat - ll_lat) / grid_spacing
5082    # To round up
5083    first_row_lat = int(round(first_row + 0.5))
5084
5085    last_row = (max_lat - ll_lat) / grid_spacing # round down
5086    last_row_lat = int(round(last_row))
5087
5088    max_distance = 157147.4112 * grid_spacing
5089    points_lat_long = []
5090
5091    # Create a list of the lat long points to include.
5092    for index_lat in range(first_row_lat, last_row_lat + 1):
5093        for index_long in range(first_row_long, last_row_long + 1):
5094            lat = ll_lat + index_lat*grid_spacing
5095            long = ll_long + index_long*grid_spacing
5096
5097            #filter here to keep good points
5098            if keep_point(lat, long, seg, max_distance):
5099                points_lat_long.append((lat, long)) #must be hashable
5100
5101    # Now that we have these points, lets throw ones out that are too far away
5102    return points_lat_long
5103
5104
5105##
5106# @brief
5107# @param lat
5108# @param long
5109# @param seg Two points in UTM.
5110# @param max_distance
5111def keep_point(lat, long, seg, max_distance):
5112    """
5113    seg is two points, UTM
5114    """
5115
5116    from math import sqrt
5117
5118    _ , x0, y0 = redfearn(lat, long)
5119    x1 = seg[0][0]
5120    y1 = seg[0][1]
5121    x2 = seg[1][0]
5122    y2 = seg[1][1]
5123    x2_1 = x2-x1
5124    y2_1 = y2-y1
5125    try:
5126        d = abs((x2_1)*(y1-y0)-(x1-x0)*(y2_1))/sqrt( \
5127            (x2_1)*(x2_1)+(y2_1)*(y2_1))
5128    except ZeroDivisionError:
5129        if sqrt((x2_1)*(x2_1)+(y2_1)*(y2_1)) == 0 \
5130           and abs((x2_1)*(y1-y0)-(x1-x0)*(y2_1)) == 0:
5131            return True
5132        else:
5133            return False
5134
5135    return d <= max_distance
5136
5137################################################################################
5138# CONVERTING UNGRIDDED URS DATA TO AN SWW FILE
5139################################################################################
5140
5141WAVEHEIGHT_MUX_LABEL = '-z-mux'
5142EAST_VELOCITY_LABEL =  '-e-mux'
5143NORTH_VELOCITY_LABEL =  '-n-mux'
5144
5145##
5146# @brief Convert URS file(s) (wave prop) to an SWW file.
5147# @param basename_in Stem of the input filenames.
5148# @param basename_out Path to the output SWW file.
5149# @param verbose True if this function is to be verbose.
5150# @param mint
5151# @param maxt
5152# @param mean_stage
5153# @param origin Tuple with geo-ref UTM coordinates (zone, easting, northing).
5154# @param hole_points_UTM
5155# @param zscale
5156# @note Also convert latitude and longitude to UTM. All coordinates are
5157#       assumed to be given in the GDA94 datum.
5158# @note Input filename stem has suffixes '-z-mux', '-e-mux' and '-n-mux'
5159#       added for relative height, x-velocity and y-velocity respectively.
5160def urs_ungridded2sww(basename_in='o', basename_out=None, verbose=False,
5161                      mint=None, maxt=None,
5162                      mean_stage=0,
5163                      origin=None,
5164                      hole_points_UTM=None,
5165                      zscale=1):
5166    """
5167    Convert URS C binary format for wave propagation to
5168    sww format native to abstract_2d_finite_volumes.
5169
5170    Specify only basename_in and read files of the form
5171    basefilename-z-mux, basefilename-e-mux and
5172    basefilename-n-mux containing relative height,
5173    x-velocity and y-velocity, respectively.
5174
5175    Also convert latitude and longitude to UTM. All coordinates are
5176    assumed to be given in the GDA94 datum. The latitude and longitude
5177    information is assumed ungridded grid.
5178
5179    min's and max's: If omitted - full extend is used.
5180    To include a value min ans max may equal it.
5181    Lat and lon are assumed to be in decimal degrees.
5182
5183    origin is a 3-tuple with geo referenced
5184    UTM coordinates (zone, easting, northing)
5185    It will be the origin of the sww file. This shouldn't be used,
5186    since all of anuga should be able to handle an arbitary origin.
5187    The mux point info is NOT relative to this origin.
5188
5189    URS C binary format has data organised as TIME, LONGITUDE, LATITUDE
5190    which means that latitude is the fastest
5191    varying dimension (row major order, so to speak)
5192
5193    In URS C binary the latitudes and longitudes are in assending order.
5194
5195    Note, interpolations of the resulting sww file will be different
5196    from results of urs2sww.  This is due to the interpolation
5197    function used, and the different grid structure between urs2sww
5198    and this function.
5199
5200    Interpolating data that has an underlying gridded source can
5201    easily end up with different values, depending on the underlying
5202    mesh.
5203
5204    consider these 4 points
5205    50  -50
5206
5207    0     0
5208
5209    The grid can be
5210     -
5211    |\|   A
5212     -
5213     or;
5214      -
5215     |/|  B
5216      -
5217
5218    If a point is just below the center of the midpoint, it will have a
5219    +ve value in grid A and a -ve value in grid B.
5220    """
5221
5222    from anuga.mesh_engine.mesh_engine import NoTrianglesError
5223    from anuga.pmesh.mesh import Mesh
5224
5225    files_in = [basename_in + WAVEHEIGHT_MUX_LABEL,
5226                basename_in + EAST_VELOCITY_LABEL,
5227                basename_in + NORTH_VELOCITY_LABEL]
5228    quantities = ['HA','UA','VA']
5229
5230    # instantiate urs_points of the three mux files.
5231    mux = {}
5232    for quantity, file in map(None, quantities, files_in):
5233        mux[quantity] = Urs_points(file)
5234
5235    # Could check that the depth is the same. (hashing)
5236
5237    # handle to a mux file to do depth stuff
5238    a_mux = mux[quantities[0]]
5239
5240    # Convert to utm
5241    lat = a_mux.lonlatdep[:,1]
5242    long = a_mux.lonlatdep[:,0]
5243    points_utm, zone = convert_from_latlon_to_utm(latitudes=lat,
5244                                                  longitudes=long)
5245
5246    elevation = a_mux.lonlatdep[:,2] * -1
5247
5248    # grid (create a mesh from the selected points)
5249    # This mesh has a problem.  Triangles are streched over ungridded areas.
5250    # If these areas could be described as holes in pmesh, that would be great.
5251
5252    # I can't just get the user to selection a point in the middle.
5253    # A boundary is needed around these points.
5254    # But if the zone of points is obvious enough auto-segment should do
5255    # a good boundary.
5256    mesh = Mesh()
5257    mesh.add_vertices(points_utm)
5258    mesh.auto_segment(smooth_indents=True, expand_pinch=True)
5259
5260    # To try and avoid alpha shape 'hugging' too much
5261    mesh.auto_segment(mesh.shape.get_alpha() * 1.1)
5262    if hole_points_UTM is not None:
5263        point = ensure_absolute(hole_points_UTM)
5264        mesh.add_hole(point[0], point[1])
5265
5266    try:
5267        mesh.generate_mesh(minimum_triangle_angle=0.0, verbose=False)
5268    except NoTrianglesError:
5269        # This is a bit of a hack, going in and changing the data structure.
5270        mesh.holes = []
5271        mesh.generate_mesh(minimum_triangle_angle=0.0, verbose=False)
5272
5273    mesh_dic = mesh.Mesh2MeshList()
5274
5275    #mesh.export_mesh_file(basename_in + '_168.tsh')
5276    #import sys; sys.exit()
5277    # These are the times of the mux file
5278    mux_times = []
5279    for i in range(a_mux.time_step_count):
5280        mux_times.append(a_mux.time_step * i)
5281    (mux_times_start_i, mux_times_fin_i) = mux2sww_time(mux_times, mint, maxt)
5282    times = mux_times[mux_times_start_i:mux_times_fin_i]
5283
5284    if mux_times_start_i == mux_times_fin_i:
5285        # Close the mux files
5286        for quantity, file in map(None, quantities, files_in):
5287            mux[quantity].close()
5288        msg = "Due to mint and maxt there's no time info in the boundary SWW."
5289        raise Exception, msg
5290
5291    # If this raise is removed there is currently no downstream errors
5292
5293    points_utm=ensure_numeric(points_utm)
5294    assert num.alltrue(ensure_numeric(mesh_dic['generatedpointlist'])
5295                       == ensure_numeric(points_utm))
5296
5297    volumes = mesh_dic['generatedtrianglelist']
5298
5299    # write sww intro and grid stuff.
5300    if basename_out is None:
5301        swwname = basename_in + '.sww'
5302    else:
5303        swwname = basename_out + '.sww'
5304
5305    if verbose: print 'Output to ', swwname
5306
5307    outfile = NetCDFFile(swwname, netcdf_mode_w)
5308
5309    # For a different way of doing this, check out tsh2sww
5310    # work out sww_times and the index range this covers
5311    sww = Write_sww()
5312    sww.store_header(outfile, times, len(volumes), len(points_utm),
5313                     verbose=verbose, sww_precision=netcdf_float)
5314    outfile.mean_stage = mean_stage
5315    outfile.zscale = zscale
5316
5317    sww.store_triangulation(outfile, points_utm, volumes,
5318                            elevation, zone,  new_origin=origin,
5319                            verbose=verbose)
5320
5321    if verbose: print 'Converting quantities'
5322
5323    # Read in a time slice from each mux file and write it to the SWW file
5324    j = 0
5325    for ha, ua, va in map(None, mux['HA'], mux['UA'], mux['VA']):
5326        if j >= mux_times_start_i and j < mux_times_fin_i:
5327            stage = zscale*ha + mean_stage
5328            h = stage - elevation
5329            xmomentum = ua*h
5330            ymomentum = -1 * va * h # -1 since in mux files south is positive.
5331            sww.store_quantities(outfile,
5332                                 slice_index=j-mux_times_start_i,
5333                                 verbose=verbose,
5334                                 stage=stage,
5335                                 xmomentum=xmomentum,
5336                                 ymomentum=ymomentum,
5337                                 sww_precision=num.float)
5338        j += 1
5339
5340    if verbose: sww.verbose_quantities(outfile)
5341
5342    outfile.close()
5343
5344    # Do some conversions while writing the sww file
5345
5346
5347################################################################################
5348# READ MUX2 FILES line of points
5349################################################################################
5350
5351WAVEHEIGHT_MUX2_LABEL = '-z-mux2'
5352EAST_VELOCITY_MUX2_LABEL = '-e-mux2'
5353NORTH_VELOCITY_MUX2_LABEL = '-n-mux2'
5354
5355##
5356# @brief
5357# @param filenames List of mux2 format input filenames.
5358# @param weights Weights associated with each source file.
5359# @param permutation The gauge numbers for which data is extracted.
5360# @param verbose True if this function is to be verbose.
5361# @return (times, latitudes, longitudes, elevation, quantity, starttime)
5362def read_mux2_py(filenames,
5363                 weights=None,
5364                 permutation=None,
5365                 verbose=False):
5366    """Access the mux files specified in the filenames list. Combine the
5367       data found therin as a weighted linear sum as specifed by the weights.
5368       If permutation is None or empty extract timeseries data for all gauges
5369       within the files.
5370
5371       Input:
5372           filenames:   List of filenames specifiying the file containing the
5373                        timeseries data (mux2 format) for each source
5374           weights:     Weighs associated with each source
5375                        (defaults to 1 for each source)
5376           permutation: Specifies the gauge numbers that for which data is to be
5377                        extracted
5378    """
5379
5380    from urs_ext import read_mux2
5381
5382    numSrc = len(filenames)
5383
5384    file_params = -1 * num.ones(3, num.float)                    # [nsta,dt,nt]
5385
5386    # Convert verbose to int C flag
5387    if verbose:
5388        verbose=1
5389    else:
5390        verbose=0
5391
5392    if weights is None:
5393        weights = num.ones(numSrc, num.int)     #array default#
5394
5395    if permutation is None:
5396        permutation = ensure_numeric([], num.float)
5397
5398    # Call underlying C implementation urs2sts_ext.c
5399    data = read_mux2(numSrc, filenames, weights, file_params,
5400                     permutation, verbose)
5401
5402    msg = 'File parameter values were not read in correctly from c file'
5403    assert len(num.compress(file_params > 0, file_params)) != 0, msg
5404
5405    msg = 'The number of stations specifed in the c array and in the file ' \
5406          'are inconsistent'
5407    assert file_params[0] >= len(permutation), msg
5408
5409    msg = 'The number of stations returned is inconsistent with ' \
5410          'the requested number'
5411    assert len(permutation) == 0 or len(permutation) == data.shape[0], msg
5412
5413    nsta = int(file_params[0])
5414    msg = 'Must have at least one station'
5415    assert nsta > 0, msg
5416
5417    dt = file_params[1]
5418    msg = 'Must have a postive timestep'
5419    assert dt > 0, msg
5420
5421    nt = int(file_params[2])
5422    msg = 'Must have at least one gauge value'
5423    assert nt > 0, msg
5424
5425    OFFSET = 5 # Number of site parameters p passed back with data
5426               # p = [geolat,geolon,depth,start_tstep,finish_tstep]
5427
5428    # FIXME (Ole): What is the relationship with params and data.shape ?
5429    # It looks as if the following asserts should pass but they don't always
5430    #
5431    #msg = 'nt = %d, data.shape[1] == %d' %(nt, data.shape[1])
5432    #assert nt == data.shape[1] - OFFSET, msg
5433    #
5434    #msg = 'nsta = %d, data.shape[0] == %d' %(nsta, data.shape[0])
5435    #assert nsta == data.shape[0], msg
5436
5437    # Number of stations in ordering file
5438    number_of_selected_stations = data.shape[0]
5439
5440    # Index where data ends and parameters begin
5441    parameters_index = data.shape[1] - OFFSET
5442
5443    times = dt * num.arange(parameters_index)
5444    latitudes = num.zeros(number_of_selected_stations, num.float)
5445    longitudes = num.zeros(number_of_selected_stations, num.float)
5446    elevation = num.zeros(number_of_selected_stations, num.float)
5447    quantity = num.zeros((number_of_selected_stations, parameters_index), num.float)
5448
5449    starttime = 1e16
5450    for i in range(number_of_selected_stations):
5451        quantity[i][:] = data[i][:parameters_index]
5452        latitudes[i] = data[i][parameters_index]
5453        longitudes[i] = data[i][parameters_index+1]
5454        elevation[i] = -data[i][parameters_index+2]
5455        first_time_step = data[i][parameters_index+3]
5456        starttime = min(dt*first_time_step, starttime)
5457
5458    return times, latitudes, longitudes, elevation, quantity, starttime
5459
5460
5461##
5462# @brief ??
5463# @param mux_times ??
5464# @param mint ??
5465# @param maxt ??
5466# @return ??
5467def mux2sww_time(mux_times, mint, maxt):
5468    """
5469    """
5470
5471    if mint == None:
5472        mux_times_start_i = 0
5473    else:
5474        mux_times_start_i = num.searchsorted(mux_times, mint)
5475
5476    if maxt == None:
5477        mux_times_fin_i = len(mux_times)
5478    else:
5479        maxt += 0.5 # so if you specify a time where there is
5480                    # data that time will be included
5481        mux_times_fin_i = num.searchsorted(mux_times, maxt)
5482
5483    return mux_times_start_i, mux_times_fin_i
5484
5485
5486##
5487# @brief Convert a URS (mux2, wave propagation) file to an STS file.
5488# @param basename_in String (or list) of source file stems.
5489# @param basename_out Stem of output STS file path.
5490# @param weights
5491# @param verbose True if this function is to be verbose.
5492# @param origin Tuple with with geo-ref UTM coords (zone, easting, northing).
5493# @param zone
5494# @param mean_stage
5495# @param zscale
5496# @param ordering_filename Path of a file specifying which mux2 gauge points are
5497#                          to be stored.
5498# @note Also convert latitude and longitude to UTM. All coordinates are
5499#       assumed to be given in the GDA94 datum.
5500def urs2sts(basename_in, basename_out=None,
5501            weights=None,
5502            verbose=False,
5503            origin=None,
5504            zone=None,
5505            central_meridian=None,           
5506            mean_stage=0.0,
5507            zscale=1.0,
5508            ordering_filename=None):
5509    """Convert URS mux2 format for wave propagation to sts format
5510
5511    Also convert latitude and longitude to UTM. All coordinates are
5512    assumed to be given in the GDA94 datum
5513
5514    origin is a 3-tuple with geo referenced
5515    UTM coordinates (zone, easting, northing)
5516
5517    inputs:
5518
5519    basename_in: list of source file prefixes
5520
5521        These are combined with the extensions:
5522        WAVEHEIGHT_MUX2_LABEL = '-z-mux2' for stage
5523        EAST_VELOCITY_MUX2_LABEL = '-e-mux2' xmomentum
5524        NORTH_VELOCITY_MUX2_LABEL = '-n-mux2' and ymomentum
5525
5526        to create a 2D list of mux2 file. The rows are associated with each
5527        quantity and must have the above extensions
5528        the columns are the list of file prefixes.
5529
5530    ordering: a .txt file name specifying which mux2 gauge points are
5531              to be stored. This is indicated by the index of the gauge
5532              in the ordering file.
5533
5534              ordering file format:
5535              1st line:    'index,longitude,latitude\n'
5536              other lines: index,longitude,latitude
5537
5538              If ordering is None or ordering file is empty then
5539               all points are taken in the order they
5540              appear in the mux2 file.
5541
5542
5543    output:
5544      basename_out: name of sts file in which mux2 data is stored.
5545     
5546     
5547     
5548    NOTE: South is positive in mux files so sign of y-component of velocity is reverted
5549    """
5550
5551    import os
5552    from Scientific.IO.NetCDF import NetCDFFile
5553    from types import ListType,StringType
5554    from operator import __and__
5555
5556    if not isinstance(basename_in, ListType):
5557        if verbose: print 'Reading single source'
5558        basename_in = [basename_in]
5559
5560    # This is the value used in the mux file format to indicate NAN data
5561    # FIXME (Ole): This should be changed everywhere to IEEE NAN when
5562    #              we upgrade to Numpy
5563    NODATA = 99
5564
5565    # Check that basename is a list of strings
5566    if not reduce(__and__, map(lambda z:isinstance(z,StringType), basename_in)):
5567        msg= 'basename_in must be a string or list of strings'
5568        raise Exception, msg
5569
5570    # Find the number of sources to be used
5571    numSrc = len(basename_in)
5572
5573    # A weight must be specified for each source
5574    if weights is None:
5575        # Default is equal weighting
5576        weights = num.ones(numSrc, num.float) / numSrc
5577    else:
5578        weights = ensure_numeric(weights)
5579        msg = 'When combining multiple sources must specify a weight for ' \
5580              'mux2 source file'
5581        assert len(weights) == numSrc, msg
5582
5583    if verbose: print 'Weights used in urs2sts:', weights
5584
5585    # Check output filename
5586    if basename_out is None:
5587        msg = 'STS filename must be specified as basename_out ' \
5588              'in function urs2sts'
5589        raise Exception, msg
5590
5591    if basename_out.endswith('.sts'):
5592        stsname = basename_out
5593    else:
5594        stsname = basename_out + '.sts'
5595
5596    # Create input filenames from basenames and check their existence
5597    files_in = [[], [], []]
5598    for files in basename_in:
5599        files_in[0].append(files + WAVEHEIGHT_MUX2_LABEL),
5600        files_in[1].append(files + EAST_VELOCITY_MUX2_LABEL)
5601        files_in[2].append(files + NORTH_VELOCITY_MUX2_LABEL)
5602
5603    quantities = ['HA','UA','VA'] # Quantity names used in the MUX2 format
5604    for i in range(len(quantities)):
5605        for file_in in files_in[i]:
5606            if (os.access(file_in, os.R_OK) == 0):
5607                msg = 'File %s does not exist or is not accessible' % file_in
5608                raise IOError, msg
5609
5610    # Establish permutation array
5611    if ordering_filename is not None:
5612        if verbose is True: print 'Reading ordering file', ordering_filename
5613
5614        # Read ordering file
5615        try:
5616            fid = open(ordering_filename, 'r')
5617            file_header = fid.readline().split(',')
5618            ordering_lines = fid.readlines()
5619            fid.close()
5620        except:
5621            msg = 'Cannot open %s' % ordering_filename
5622            raise Exception, msg
5623
5624        reference_header = 'index, longitude, latitude\n'
5625        reference_header_split = reference_header.split(',')
5626        for i in range(3):
5627            if not file_header[i].strip() == reference_header_split[i].strip():
5628                msg = 'File must contain header: ' + reference_header
5629                raise Exception, msg
5630
5631        if len(ordering_lines) < 2:
5632            msg = 'File must contain at least two points'
5633            raise Exception, msg
5634
5635        permutation = [int(line.split(',')[0]) for line in ordering_lines]
5636        permutation = ensure_numeric(permutation)
5637    else:
5638        permutation = None
5639
5640    # Read MUX2 files
5641    if (verbose): print 'reading mux2 file'
5642
5643    mux={}
5644    for i, quantity in enumerate(quantities):
5645        # For each quantity read the associated list of source mux2 file with
5646        # extention associated with that quantity
5647
5648        times, latitudes, longitudes, elevation, mux[quantity], starttime \
5649            = read_mux2_py(files_in[i], weights, permutation, verbose=verbose)
5650
5651        # Check that all quantities have consistent time and space information
5652        if quantity != quantities[0]:
5653            msg = '%s, %s and %s have inconsistent gauge data' \
5654                  % (files_in[0], files_in[1], files_in[2])
5655            assert num.allclose(times, times_old), msg
5656            assert num.allclose(latitudes, latitudes_old), msg
5657            assert num.allclose(longitudes, longitudes_old), msg
5658            assert num.allclose(elevation, elevation_old), msg
5659            assert num.allclose(starttime, starttime_old), msg
5660        times_old = times
5661        latitudes_old = latitudes
5662        longitudes_old = longitudes
5663        elevation_old = elevation
5664        starttime_old = starttime
5665
5666        # Self check - can be removed to improve speed
5667        #ref_longitudes = [float(line.split(',')[1]) for line in ordering_lines]
5668        #ref_latitudes = [float(line.split(',')[2]) for line in ordering_lines]
5669        #
5670        #msg = 'Longitudes specified in ordering file do not match those ' \
5671        #      'found in mux files. ' \
5672        #      'I got %s instead of %s (only beginning shown)' \
5673        #      % (str(longitudes[:10]) + '...',
5674        #         str(ref_longitudes[:10]) + '...')
5675        #assert allclose(longitudes, ref_longitudes), msg
5676        #
5677        #msg = 'Latitudes specified in ordering file do not match those ' \
5678        #      'found in mux files. '
5679        #      'I got %s instead of %s (only beginning shown)' \
5680        #      % (str(latitudes[:10]) + '...',
5681        #         str(ref_latitudes[:10]) + '...')
5682        #assert allclose(latitudes, ref_latitudes), msg
5683
5684    # Store timeseries in STS file
5685    msg = 'File is empty and or clipped region not in file region'
5686    assert len(latitudes > 0), msg
5687
5688    number_of_points = latitudes.shape[0]      # Number of stations retrieved
5689    number_of_times = times.shape[0]           # Number of timesteps
5690    number_of_latitudes = latitudes.shape[0]   # Number latitudes
5691    number_of_longitudes = longitudes.shape[0] # Number longitudes
5692
5693    # The permutation vector of contains original indices
5694    # as given in ordering file or None in which case points
5695    # are assigned the trivial indices enumerating them from
5696    # 0 to number_of_points-1
5697    if permutation is None:
5698        permutation = num.arange(number_of_points, dtype=num.int)
5699
5700    # NetCDF file definition
5701    outfile = NetCDFFile(stsname, netcdf_mode_w)
5702
5703    description = 'Converted from URS mux2 files: %s' % basename_in
5704
5705    # Create new file
5706    sts = Write_sts()
5707    sts.store_header(outfile,
5708                     times+starttime,
5709                     number_of_points,
5710                     description=description,
5711                     verbose=verbose,
5712                     sts_precision=netcdf_float)
5713
5714    # Store
5715    from anuga.coordinate_transforms.redfearn import redfearn
5716
5717    x = num.zeros(number_of_points, num.float)  # Easting
5718    y = num.zeros(number_of_points, num.float)  # Northing
5719
5720    # Check zone boundaries
5721    if zone is None:
5722        refzone, _, _ = redfearn(latitudes[0], longitudes[0],
5723                                 central_meridian=central_meridian)
5724    else:
5725        refzone = zone
5726
5727    old_zone = refzone
5728
5729    for i in range(number_of_points):
5730        computed_zone, easting, northing = redfearn(latitudes[i], longitudes[i],
5731                                                    zone=zone,
5732                                                    central_meridian=central_meridian)
5733        x[i] = easting
5734        y[i] = northing
5735        if computed_zone != refzone:
5736            msg = 'All sts gauges need to be in the same zone. \n'
5737            msg += 'offending gauge:Zone %d,%.4f, %4f\n' \
5738                   % (computed_zone, easting, northing)
5739            msg += 'previous gauge:Zone %d,%.4f, %4f' \
5740                   % (old_zone, old_easting, old_northing)
5741            raise Exception, msg
5742        old_zone = computed_zone
5743        old_easting = easting
5744        old_northing = northing
5745
5746    if origin is None:
5747        origin = Geo_reference(refzone, min(x), min(y))
5748    geo_ref = write_NetCDF_georeference(origin, outfile)
5749
5750    elevation = num.resize(elevation, outfile.variables['elevation'][:].shape)
5751    outfile.variables['permutation'][:] = permutation.astype(num.int32) # Opteron 64
5752    outfile.variables['x'][:] = x - geo_ref.get_xllcorner()
5753    outfile.variables['y'][:] = y - geo_ref.get_yllcorner()
5754    outfile.variables['elevation'][:] = elevation
5755
5756    stage = outfile.variables['stage']
5757    xmomentum = outfile.variables['xmomentum']
5758    ymomentum = outfile.variables['ymomentum']
5759
5760    if verbose: print 'Converting quantities'
5761
5762    for j in range(len(times)):
5763        for i in range(number_of_points):
5764            ha = mux['HA'][i,j]
5765            ua = mux['UA'][i,j]
5766            va = mux['VA'][i,j]
5767            if ha == NODATA:
5768                if verbose:
5769                    msg = 'Setting nodata value %d to 0 at time = %f, ' \
5770                          'point = %d' % (ha, times[j], i)
5771                    print msg
5772                ha = 0.0
5773                ua = 0.0
5774                va = 0.0
5775
5776            w = zscale*ha + mean_stage
5777            h = w - elevation[i]
5778            stage[j,i] = w
5779
5780            xmomentum[j,i] = ua * h
5781            ymomentum[j,i] = -va * h # South is positive in mux files
5782
5783
5784    outfile.close()
5785
5786
5787##
5788# @brief Create a list of points defining a boundary from an STS file.
5789# @param stsname Stem of path to the STS file to read.
5790# @return A list of boundary points.
5791def create_sts_boundary(stsname):
5792    """Create a list of points defining a boundary from an STS file.
5793
5794    Create boundary segments from .sts file. Points can be stored in
5795    arbitrary order within the .sts file. The order in which the .sts points
5796    make up the boundary are given in order.txt file
5797
5798    FIXME:
5799    Point coordinates are stored in relative eastings and northings.
5800    But boundary is produced in absolute coordinates
5801    """
5802
5803    try:
5804        fid = NetCDFFile(stsname + '.sts', netcdf_mode_r)
5805    except:
5806        msg = 'Cannot open %s' % stsname + '.sts'
5807        raise msg
5808
5809    xllcorner = fid.xllcorner[0]
5810    yllcorner = fid.yllcorner[0]
5811
5812    #Points stored in sts file are normalised to [xllcorner,yllcorner] but
5813    #we cannot assume that boundary polygon will be. At least the
5814    #additional points specified by the user after this function is called
5815    x = fid.variables['x'][:] + xllcorner
5816    y = fid.variables['y'][:] + yllcorner
5817
5818    x = num.reshape(x, (len(x),1))
5819    y = num.reshape(y, (len(y),1))
5820    sts_points = num.concatenate((x,y), axis=1)
5821
5822    return sts_points.tolist()
5823
5824
5825##
5826# @brief A class to write an SWW file.
5827class Write_sww:
5828    from anuga.shallow_water.shallow_water_domain import Domain
5829
5830    # FIXME (Ole): Hardwiring the conserved quantities like
5831    # this could be a problem. I would prefer taking them from
5832    # the instantiation of Domain.
5833    #
5834    # (DSG) There is not always a Domain instance when Write_sww is used.
5835    # Check to see if this is the same level of hardwiring as is in
5836    # shallow water doamain.
5837
5838    sww_quantities = Domain.conserved_quantities
5839
5840    RANGE = '_range'
5841    EXTREMA = ':extrema'
5842
5843    ##
5844    # brief Instantiate the SWW writer class.
5845    def __init__(self):
5846        pass
5847
5848    ##
5849    # @brief Store a header in the SWW file.
5850    # @param outfile Open handle to the file that will be written.
5851    # @param times A list of time slices *or* a start time.
5852    # @param number_of_volumes The number of triangles.
5853    # @param number_of_points The number of points.
5854    # @param description The internal file description string.
5855    # @param smoothing True if smoothing is to be used.
5856    # @param order
5857    # @param sww_precision Data type of the quantity written (netcdf constant)
5858    # @param verbose True if this function is to be verbose.
5859    # @note If 'times' is a list, the info will be made relative.
5860    def store_header(self,
5861                     outfile,
5862                     times,
5863                     number_of_volumes,
5864                     number_of_points,
5865                     description='Converted from XXX',
5866                     smoothing=True,
5867                     order=1,
5868                     sww_precision=netcdf_float32,
5869                     verbose=False):
5870        """Write an SWW file header.
5871
5872        outfile - the open file that will be written
5873        times - A list of the time slice times OR a start time
5874        Note, if a list is given the info will be made relative.
5875        number_of_volumes - the number of triangles
5876        """
5877
5878        outfile.institution = 'Geoscience Australia'
5879        outfile.description = description
5880
5881        # For sww compatibility
5882        if smoothing is True:
5883            # Smoothing to be depreciated
5884            outfile.smoothing = 'Yes'
5885            outfile.vertices_are_stored_uniquely = 'False'
5886        else:
5887            # Smoothing to be depreciated
5888            outfile.smoothing = 'No'
5889            outfile.vertices_are_stored_uniquely = 'True'
5890        outfile.order = order
5891
5892        try:
5893            revision_number = get_revision_number()
5894        except:
5895            revision_number = None
5896        # Allow None to be stored as a string
5897        outfile.revision_number = str(revision_number)
5898
5899        # This is being used to seperate one number from a list.
5900        # what it is actually doing is sorting lists from numeric arrays.
5901        if isinstance(times, (list, num.ndarray)):
5902            number_of_times = len(times)
5903            times = ensure_numeric(times)
5904            if number_of_times == 0:
5905                starttime = 0
5906            else:
5907                starttime = times[0]
5908                times = times - starttime  #Store relative times
5909        else:
5910            number_of_times = 0
5911            starttime = times
5912            #times = ensure_numeric([])
5913
5914        outfile.starttime = starttime
5915
5916        # dimension definitions
5917        outfile.createDimension('number_of_volumes', number_of_volumes)
5918        outfile.createDimension('number_of_vertices', 3)
5919        outfile.createDimension('numbers_in_range', 2)
5920
5921        if smoothing is True:
5922            outfile.createDimension('number_of_points', number_of_points)
5923            # FIXME(Ole): This will cause sww files for parallel domains to
5924            # have ghost nodes stored (but not used by triangles).
5925            # To clean this up, we have to change get_vertex_values and
5926            # friends in quantity.py (but I can't be bothered right now)
5927        else:
5928            outfile.createDimension('number_of_points', 3*number_of_volumes)
5929
5930        outfile.createDimension('number_of_timesteps', number_of_times)
5931
5932        # variable definitions
5933        outfile.createVariable('x', sww_precision, ('number_of_points',))
5934        outfile.createVariable('y', sww_precision, ('number_of_points',))
5935        outfile.createVariable('elevation', sww_precision,
5936                               ('number_of_points',))
5937        q = 'elevation'
5938        outfile.createVariable(q + Write_sww.RANGE, sww_precision,
5939                               ('numbers_in_range',))
5940
5941        # Initialise ranges with small and large sentinels.
5942        # If this was in pure Python we could have used None sensibly
5943        outfile.variables[q+Write_sww.RANGE][0] = max_float  # Min
5944        outfile.variables[q+Write_sww.RANGE][1] = -max_float # Max
5945
5946        # FIXME: Backwards compatibility
5947        outfile.createVariable('z', sww_precision, ('number_of_points',))
5948
5949        outfile.createVariable('volumes', netcdf_int, ('number_of_volumes',
5950                                                       'number_of_vertices'))
5951
5952        # Doing sww_precision instead of Float gives cast errors.
5953        outfile.createVariable('time', netcdf_float,
5954                               ('number_of_timesteps',))
5955
5956        for q in Write_sww.sww_quantities:
5957            outfile.createVariable(q, sww_precision, ('number_of_timesteps',
5958                                                      'number_of_points'))
5959            outfile.createVariable(q + Write_sww.RANGE, sww_precision,
5960                                   ('numbers_in_range',))
5961
5962            # Initialise ranges with small and large sentinels.
5963            # If this was in pure Python we could have used None sensibly
5964            outfile.variables[q+Write_sww.RANGE][0] = max_float  # Min
5965            outfile.variables[q+Write_sww.RANGE][1] = -max_float # Max
5966
5967        if isinstance(times, (list, num.ndarray)):
5968            outfile.variables['time'][:] = times    #Store time relative
5969
5970        if verbose:
5971            print '------------------------------------------------'
5972            print 'Statistics:'
5973            print '    t in [%f, %f], len(t) == %d' \
5974                  % (num.min(times), num.max(times), len(times.flat))
5975
5976    ##
5977    # @brief Store triangulation data in the underlying file.
5978    # @param outfile Open handle to underlying file.
5979    # @param points_utm List or array of points in UTM.
5980    # @param volumes
5981    # @param elevation
5982    # @param zone
5983    # @param new_origin georeference that the points can be set to.
5984    # @param points_georeference The georeference of the points_utm.
5985    # @param verbose True if this function is to be verbose.
5986    def store_triangulation(self,
5987                            outfile,
5988                            points_utm,
5989                            volumes,
5990                            elevation, zone=None, new_origin=None,
5991                            points_georeference=None, verbose=False):
5992        """
5993        new_origin - qa georeference that the points can be set to. (Maybe
5994        do this before calling this function.)
5995
5996        points_utm - currently a list or array of the points in UTM.
5997        points_georeference - the georeference of the points_utm
5998
5999        How about passing new_origin and current_origin.
6000        If you get both, do a convertion from the old to the new.
6001
6002        If you only get new_origin, the points are absolute,
6003        convert to relative
6004
6005        if you only get the current_origin the points are relative, store
6006        as relative.
6007
6008        if you get no georefs create a new georef based on the minimums of
6009        points_utm.  (Another option would be to default to absolute)
6010
6011        Yes, and this is done in another part of the code.
6012        Probably geospatial.
6013
6014        If you don't supply either geo_refs, then supply a zone. If not
6015        the default zone will be used.
6016
6017        precon:
6018            header has been called.
6019        """
6020
6021        number_of_points = len(points_utm)
6022        volumes = num.array(volumes)
6023        points_utm = num.array(points_utm)
6024
6025        # given the two geo_refs and the points, do the stuff
6026        # described in the method header
6027        # if this is needed else where, pull out as a function
6028        points_georeference = ensure_geo_reference(points_georeference)
6029        new_origin = ensure_geo_reference(new_origin)
6030        if new_origin is None and points_georeference is not None:
6031            points = points_utm
6032            geo_ref = points_georeference
6033        else:
6034            if new_origin is None:
6035                new_origin = Geo_reference(zone, min(points_utm[:,0]),
6036                                                 min(points_utm[:,1]))
6037            points = new_origin.change_points_geo_ref(points_utm,
6038                                                      points_georeference)
6039            geo_ref = new_origin
6040
6041        # At this stage I need a georef and points
6042        # the points are relative to the georef
6043        geo_ref.write_NetCDF(outfile)
6044
6045        # This will put the geo ref in the middle
6046        #geo_ref = Geo_reference(refzone,(max(x)+min(x))/2.0,(max(x)+min(y))/2.)
6047
6048        x =  points[:,0]
6049        y =  points[:,1]
6050        z = outfile.variables['z'][:]
6051
6052        if verbose:
6053            print '------------------------------------------------'
6054            print 'More Statistics:'
6055            print '  Extent (/lon):'
6056            print '    x in [%f, %f], len(lat) == %d' \
6057                  % (min(x), max(x), len(x))
6058            print '    y in [%f, %f], len(lon) == %d' \
6059                  % (min(y), max(y), len(y))
6060            print '    z in [%f, %f], len(z) == %d' \
6061                  % (min(elevation), max(elevation), len(elevation))
6062            print 'geo_ref: ',geo_ref
6063            print '------------------------------------------------'
6064
6065        #z = resize(bath_grid, outfile.variables['z'][:].shape)
6066        outfile.variables['x'][:] = points[:,0] #- geo_ref.get_xllcorner()
6067        outfile.variables['y'][:] = points[:,1] #- geo_ref.get_yllcorner()
6068        outfile.variables['z'][:] = elevation
6069        outfile.variables['elevation'][:] = elevation  #FIXME HACK
6070        outfile.variables['volumes'][:] = volumes.astype(num.int32) #On Opteron 64
6071
6072        q = 'elevation'
6073        # This updates the _range values
6074        outfile.variables[q + Write_sww.RANGE][0] = num.min(elevation)
6075        outfile.variables[q + Write_sww.RANGE][1] = num.max(elevation)
6076
6077
6078    ##
6079    # @brief Write the quantity data to the underlying file.
6080    # @param outfile Handle to open underlying file.
6081    # @param sww_precision Format of quantity data to write (default Float32).
6082    # @param slice_index
6083    # @param time
6084    # @param verbose True if this function is to be verbose.
6085    # @param **quant
6086    def store_quantities(self, outfile, sww_precision=num.float32,
6087                         slice_index=None, time=None,
6088                         verbose=False, **quant):
6089        """
6090        Write the quantity info.
6091
6092        **quant is extra keyword arguments passed in. These must be
6093          the sww quantities, currently; stage, xmomentum, ymomentum.
6094
6095        if the time array is already been built, use the slice_index
6096        to specify the index.
6097
6098        Otherwise, use time to increase the time dimension
6099
6100        Maybe make this general, but the viewer assumes these quantities,
6101        so maybe we don't want it general - unless the viewer is general
6102
6103        precon
6104        triangulation and
6105        header have been called.
6106        """
6107
6108        if time is not None:
6109            file_time = outfile.variables['time']
6110            slice_index = len(file_time)
6111            file_time[slice_index] = time
6112
6113        # Write the conserved quantities from Domain.
6114        # Typically stage,  xmomentum, ymomentum
6115        # other quantities will be ignored, silently.
6116        # Also write the ranges: stage_range,
6117        # xmomentum_range and ymomentum_range
6118        for q in Write_sww.sww_quantities:
6119            if not quant.has_key(q):
6120                msg = 'SWW file can not write quantity %s' % q
6121                raise NewQuantity, msg
6122            else:
6123                q_values = quant[q]
6124                outfile.variables[q][slice_index] = \
6125                                q_values.astype(sww_precision)
6126       
6127                # This updates the _range values
6128                q_range = outfile.variables[q + Write_sww.RANGE][:]
6129                q_values_min = num.min(q_values)
6130                if q_values_min < q_range[0]:
6131                    outfile.variables[q + Write_sww.RANGE][0] = q_values_min
6132                q_values_max = num.max(q_values)
6133                if q_values_max > q_range[1]:
6134                    outfile.variables[q + Write_sww.RANGE][1] = q_values_max
6135
6136    ##
6137    # @brief Print the quantities in the underlying file.
6138    # @param outfile UNUSED.
6139    def verbose_quantities(self, outfile):
6140        print '------------------------------------------------'
6141        print 'More Statistics:'
6142        for q in Write_sww.sww_quantities:
6143            print %s in [%f, %f]' % (q,
6144                                        outfile.variables[q+Write_sww.RANGE][0],
6145                                        outfile.variables[q+Write_sww.RANGE][1])
6146        print '------------------------------------------------'
6147
6148
6149##
6150# @brief Obsolete?
6151# @param outfile
6152# @param has
6153# @param uas
6154# @param vas
6155# @param elevation
6156# @param mean_stage
6157# @param zscale
6158# @param verbose
6159def obsolete_write_sww_time_slices(outfile, has, uas, vas, elevation,
6160                                   mean_stage=0, zscale=1,
6161                                   verbose=False):
6162    #Time stepping
6163    stage = outfile.variables['stage']
6164    xmomentum = outfile.variables['xmomentum']
6165    ymomentum = outfile.variables['ymomentum']
6166
6167    n = len(has)
6168    j = 0
6169    for ha, ua, va in map(None, has, uas, vas):
6170        if verbose and j % ((n+10)/10) == 0: print '  Doing %d of %d' % (j, n)
6171        w = zscale*ha + mean_stage
6172        stage[j] = w
6173        h = w - elevation
6174        xmomentum[j] = ua * h
6175        ymomentum[j] = -1 * va * h  # -1 since in mux files south is positive.
6176        j += 1
6177
6178
6179##
6180# @brief Convert a set of URS files to a text file.
6181# @param basename_in Stem path to the 3 URS files.
6182# @param location_index ??
6183def urs2txt(basename_in, location_index=None):
6184    """
6185    Not finished or tested
6186    """
6187
6188    files_in = [basename_in + WAVEHEIGHT_MUX_LABEL,
6189                basename_in + EAST_VELOCITY_LABEL,
6190                basename_in + NORTH_VELOCITY_LABEL]
6191    quantities = ['HA','UA','VA']
6192
6193    d = ","
6194
6195    # instantiate urs_points of the three mux files.
6196    mux = {}
6197    for quantity, file in map(None, quantities, files_in):
6198        mux[quantity] = Urs_points(file)
6199
6200    # Could check that the depth is the same. (hashing)
6201
6202    # handle to a mux file to do depth stuff
6203    a_mux = mux[quantities[0]]
6204
6205    # Convert to utm
6206    latitudes = a_mux.lonlatdep[:,1]
6207    longitudes = a_mux.lonlatdep[:,0]
6208    points_utm, zone = \
6209        convert_from_latlon_to_utm(latitudes=latitudes, longitudes=longitudes)
6210    depths = a_mux.lonlatdep[:,2]
6211
6212    # open the output text file, start writing.
6213    fid = open(basename_in + '.txt', 'w')
6214
6215    fid.write("zone: " + str(zone) + "\n")
6216
6217    if location_index is not None:
6218        #Title
6219        li = location_index
6220        fid.write('location_index' + d + 'lat' + d + 'long' + d +
6221                  'Easting' + d + 'Northing' + '\n')
6222        fid.write(str(li) + d + str(latitudes[li]) + d +
6223                  str(longitudes[li]) + d + str(points_utm[li][0]) + d +
6224                  str(points_utm[li][01]) + '\n')
6225
6226    # the non-time dependent stuff
6227    #Title
6228    fid.write('location_index' + d + 'lat' + d + 'long' + d +
6229              'Easting' + d + 'Northing' + d + 'depth m' + '\n')
6230    i = 0
6231    for depth, point_utm, lat, long in map(None, depths, points_utm,
6232                                           latitudes, longitudes):
6233
6234        fid.write(str(i) + d + str(lat) + d + str(long) + d +
6235                  str(point_utm[0]) + d + str(point_utm[01]) + d +
6236                  str(depth) + '\n')
6237        i += 1
6238
6239    #Time dependent
6240    if location_index is not None:
6241        time_step = a_mux.time_step
6242        i = 0
6243        #Title
6244        fid.write('time' + d + 'HA depth m' + d + 'UA momentum East x m/sec' +
6245                  d + 'VA momentum North y m/sec' + '\n')
6246        for HA, UA, VA in map(None, mux['HA'], mux['UA'], mux['VA']):
6247            fid.write(str(i*time_step) + d + str(HA[location_index]) + d +
6248                      str(UA[location_index]) + d +
6249                      str(VA[location_index]) + '\n')
6250            i += 1
6251
6252
6253##
6254# @brief A class to write STS files.
6255class Write_sts:
6256    sts_quantities = ['stage','xmomentum','ymomentum']
6257    RANGE = '_range'
6258    EXTREMA = ':extrema'
6259
6260    ##
6261    # @brief Instantiate this STS writer class.
6262    def __init__(self):
6263        pass
6264
6265    ##
6266    # @brief Write a header to the underlying data file.
6267    # @param outfile Handle to open file to write.
6268    # @param times A list of the time slice times *or* a start time.
6269    # @param number_of_points The number of URS gauge sites.
6270    # @param description Description string to write into the STS file.
6271    # @param sts_precision Format of data to write (netcdf constant ONLY).
6272    # @param verbose True if this function is to be verbose.
6273    # @note If 'times' is a list, the info will be made relative.
6274    def store_header(self,
6275                     outfile,
6276                     times,
6277                     number_of_points,
6278                     description='Converted from URS mux2 format',
6279                     sts_precision=netcdf_float32,
6280                     verbose=False):
6281        """
6282        outfile - the name of the file that will be written
6283        times - A list of the time slice times OR a start time
6284        Note, if a list is given the info will be made relative.
6285        number_of_points - the number of urs gauges sites
6286        """
6287
6288        outfile.institution = 'Geoscience Australia'
6289        outfile.description = description
6290
6291        try:
6292            revision_number = get_revision_number()
6293        except:
6294            revision_number = None
6295
6296        # Allow None to be stored as a string
6297        outfile.revision_number = str(revision_number)
6298
6299        # Start time in seconds since the epoch (midnight 1/1/1970)
6300        # This is being used to seperate one number from a list.
6301        # what it is actually doing is sorting lists from numeric arrays.
6302        if isinstance(times, (list, num.ndarray)):
6303            number_of_times = len(times)
6304            times = ensure_numeric(times)
6305            if number_of_times == 0:
6306                starttime = 0
6307            else:
6308                starttime = times[0]
6309                times = times - starttime  #Store relative times
6310        else:
6311            number_of_times = 0
6312            starttime = times
6313
6314        outfile.starttime = starttime
6315
6316        # Dimension definitions
6317        outfile.createDimension('number_of_points', number_of_points)
6318        outfile.createDimension('number_of_timesteps', number_of_times)
6319        outfile.createDimension('numbers_in_range', 2)
6320
6321        # Variable definitions
6322        outfile.createVariable('permutation', netcdf_int, ('number_of_points',))
6323        outfile.createVariable('x', sts_precision, ('number_of_points',))
6324        outfile.createVariable('y', sts_precision, ('number_of_points',))
6325        outfile.createVariable('elevation',sts_precision, ('number_of_points',))
6326
6327        q = 'elevation'
6328        outfile.createVariable(q + Write_sts.RANGE, sts_precision,
6329                               ('numbers_in_range',))
6330
6331        # Initialise ranges with small and large sentinels.
6332        # If this was in pure Python we could have used None sensibly
6333        outfile.variables[q + Write_sts.RANGE][0] = max_float  # Min
6334        outfile.variables[q + Write_sts.RANGE][1] = -max_float # Max
6335
6336        # Doing sts_precision instead of Float gives cast errors.
6337        outfile.createVariable('time', netcdf_float, ('number_of_timesteps',))
6338
6339        for q in Write_sts.sts_quantities:
6340            outfile.createVariable(q, sts_precision, ('number_of_timesteps',
6341                                                      'number_of_points'))
6342            outfile.createVariable(q + Write_sts.RANGE, sts_precision,
6343                                   ('numbers_in_range',))
6344            # Initialise ranges with small and large sentinels.
6345            # If this was in pure Python we could have used None sensibly
6346            outfile.variables[q + Write_sts.RANGE][0] = max_float  # Min
6347            outfile.variables[q + Write_sts.RANGE][1] = -max_float # Max
6348
6349        if isinstance(times, (list, num.ndarray)):
6350            outfile.variables['time'][:] = times    #Store time relative
6351
6352        if verbose:
6353            print '------------------------------------------------'
6354            print 'Statistics:'
6355            print '    t in [%f, %f], len(t) == %d' \
6356                  % (num.min(times), num.max(times), len(times.flat))
6357
6358    ##
6359    # @brief
6360    # @param outfile
6361    # @param points_utm
6362    # @param elevation
6363    # @param zone
6364    # @param new_origin
6365    # @param points_georeference
6366    # @param verbose True if this function is to be verbose.
6367    def store_points(self,
6368                     outfile,
6369                     points_utm,
6370                     elevation, zone=None, new_origin=None,
6371                     points_georeference=None, verbose=False):
6372
6373        """
6374        points_utm - currently a list or array of the points in UTM.
6375        points_georeference - the georeference of the points_utm
6376
6377        How about passing new_origin and current_origin.
6378        If you get both, do a convertion from the old to the new.
6379
6380        If you only get new_origin, the points are absolute,
6381        convert to relative
6382