source: branches/numpy/anuga/shallow_water/data_manager.py @ 7009

Last change on this file since 7009 was 7009, checked in by rwilson, 16 years ago

Replaced 'sets' module with 'frozenset' builtin class.

File size: 255.3 KB
Line 
1"""datamanager.py - input output for AnuGA
2
3
4This module takes care of reading and writing datafiles such as topograhies,
5model output, etc
6
7
8Formats used within AnuGA:
9
10.sww: Netcdf format for storing model output f(t,x,y)
11.tms: Netcdf format for storing time series f(t)
12
13.csv: ASCII format for storing arbitrary points and associated attributes
14.pts: NetCDF format for storing arbitrary points and associated attributes
15
16.asc: ASCII format of regular DEMs as output from ArcView
17.prj: Associated ArcView file giving more meta data for asc format
18.ers: ERMapper header format of regular DEMs for ArcView
19
20.dem: NetCDF representation of regular DEM data
21
22.tsh: ASCII format for storing meshes and associated boundary and region info
23.msh: NetCDF format for storing meshes and associated boundary and region info
24
25.nc: Native ferret NetCDF format
26.geo: Houdinis ascii geometry format (?)
27
28
29A typical dataflow can be described as follows
30
31Manually created files:
32ASC, PRJ:     Digital elevation models (gridded)
33TSH:          Triangular meshes (e.g. created from anuga.pmesh)
34NC            Model outputs for use as boundary conditions (e.g from MOST)
35
36
37AUTOMATICALLY CREATED FILES:
38
39ASC, PRJ  ->  DEM  ->  PTS: Conversion of DEM's to native pts file
40
41NC -> SWW: Conversion of MOST bundary files to boundary sww
42
43PTS + TSH -> TSH with elevation: Least squares fit
44
45TSH -> SWW:  Conversion of TSH to sww viewable using Swollen
46
47TSH + Boundary SWW -> SWW: Simluation using abstract_2d_finite_volumes
48
49"""
50
51# This file was reverted from changeset:5484 to changeset:5470 on 10th July
52# by Ole.
53
54import os, sys
55import csv
56import exceptions
57import string
58import shutil
59from struct import unpack
60import array as p_array
61from os import sep, path, remove, mkdir, access, F_OK, W_OK, getcwd
62
63import numpy as num
64
65from Scientific.IO.NetCDF import NetCDFFile
66from os.path import exists, basename, join
67from os import getcwd
68
69from anuga.coordinate_transforms.redfearn import redfearn, \
70     convert_from_latlon_to_utm
71from anuga.coordinate_transforms.geo_reference import Geo_reference, \
72     write_NetCDF_georeference, ensure_geo_reference
73from anuga.geospatial_data.geospatial_data import Geospatial_data,\
74     ensure_absolute
75from anuga.config import minimum_storable_height as \
76     default_minimum_storable_height
77from anuga.config import netcdf_mode_r, netcdf_mode_w, netcdf_mode_a
78from anuga.config import netcdf_float, netcdf_float32, netcdf_int
79from anuga.config import max_float
80from anuga.utilities.numerical_tools import ensure_numeric,  mean
81from anuga.caching.caching import myhash
82from anuga.utilities.anuga_exceptions import ANUGAError
83from anuga.shallow_water import Domain
84from anuga.abstract_2d_finite_volumes.pmesh2domain import \
85     pmesh_to_domain_instance
86from anuga.abstract_2d_finite_volumes.util import get_revision_number, \
87     remove_lone_verts, sww2timeseries, get_centroid_values
88
89from anuga.abstract_2d_finite_volumes.neighbour_mesh import segment_midpoints
90from anuga.load_mesh.loadASCII import export_mesh_file
91from anuga.utilities.polygon import intersection
92
93from anuga.utilities.system_tools import get_vars_in_expression
94
95
96# Default block size for sww2dem()
97DEFAULT_BLOCK_SIZE = 10000
98
99######
100# Exception classes
101######
102
103class TitleValueError(exceptions.Exception): pass
104class DataMissingValuesError(exceptions.Exception): pass
105class DataFileNotOpenError(exceptions.Exception): pass
106class DataTimeError(exceptions.Exception): pass
107class DataDomainError(exceptions.Exception): pass
108class NewQuantity(exceptions.Exception): pass
109
110
111######
112# formula mappings
113######
114
115quantity_formula = {'momentum':'(xmomentum**2 + ymomentum**2)**0.5',
116                    'depth':'stage-elevation',
117                    'speed': \
118 '(xmomentum**2 + ymomentum**2)**0.5/(stage-elevation+1.e-6/(stage-elevation))'}
119
120
121##
122# @brief Convert a possible filename into a standard form.
123# @param s Filename to process.
124# @return The new filename string.
125def make_filename(s):
126    """Transform argument string into a Sexsuitable filename
127    """
128
129    s = s.strip()
130    s = s.replace(' ', '_')
131    s = s.replace('(', '')
132    s = s.replace(')', '')
133    s = s.replace('__', '_')
134
135    return s
136
137
138##
139# @brief Check that a specified filesystem directory path exists.
140# @param path The dirstory path to check.
141# @param verbose True if this function is to be verbose.
142# @note If directory path doesn't exist, it will be created.
143def check_dir(path, verbose=None):
144    """Check that specified path exists.
145    If path does not exist it will be created if possible
146
147    USAGE:
148       checkdir(path, verbose):
149
150    ARGUMENTS:
151        path -- Directory
152        verbose -- Flag verbose output (default: None)
153
154    RETURN VALUE:
155        Verified path including trailing separator
156    """
157
158    import os.path
159
160    if sys.platform in ['nt', 'dos', 'win32', 'what else?']:
161        unix = 0
162    else:
163        unix = 1
164
165    # add terminal separator, if it's not already there
166    if path[-1] != os.sep:
167        path = path + os.sep
168
169    # expand ~ or ~username in path
170    path = os.path.expanduser(path)
171
172    # create directory if required
173    if not (os.access(path, os.R_OK and os.W_OK) or path == ''):
174        try:
175            exitcode = os.mkdir(path)
176
177            # Change access rights if possible
178            if unix:
179                exitcode = os.system('chmod 775 ' + path)
180            else:
181                pass  # FIXME: What about access rights under Windows?
182
183            if verbose: print 'MESSAGE: Directory', path, 'created.'
184        except:
185            print 'WARNING: Directory', path, 'could not be created.'
186            if unix:
187                path = '/tmp/'
188            else:
189                path = 'C:' + os.sep
190
191            print "Using directory '%s' instead" % path
192
193    return path
194
195
196##
197# @brief Delete directory and all sub-directories.
198# @param path Path to the directory to delete.
199def del_dir(path):
200    """Recursively delete directory path and all its contents
201    """
202
203    if os.path.isdir(path):
204        for file in os.listdir(path):
205            X = os.path.join(path, file)
206
207            if os.path.isdir(X) and not os.path.islink(X):
208                del_dir(X)
209            else:
210                try:
211                    os.remove(X)
212                except:
213                    print "Could not remove file %s" % X
214
215        os.rmdir(path)
216
217
218##
219# @brief ??
220# @param path
221# @param __func__
222# @param verbose True if this function is to be verbose.
223# @note ANOTHER OPTION, IF NEED IN THE FUTURE, Nick B 7/2007
224def rmgeneric(path, func, verbose=False):
225    ERROR_STR= """Error removing %(path)s, %(error)s """
226
227    try:
228        func(path)
229        if verbose: print 'Removed ', path
230    except OSError, (errno, strerror):
231        print ERROR_STR % {'path' : path, 'error': strerror }
232
233
234##
235# @brief Remove directory and all sub-directories.
236# @param path Filesystem path to directory to remove.
237# @param verbose True if this function is to be verbose.
238def removeall(path, verbose=False):
239    if not os.path.isdir(path):
240        return
241
242    for x in os.listdir(path):
243        fullpath = os.path.join(path, x)
244        if os.path.isfile(fullpath):
245            f = os.remove
246            rmgeneric(fullpath, f)
247        elif os.path.isdir(fullpath):
248            removeall(fullpath)
249            f = os.rmdir
250            rmgeneric(fullpath, f, verbose)
251
252
253##
254# @brief Create a standard filename.
255# @param datadir Directory where file is to be created.
256# @param filename Filename 'stem'.
257# @param format Format of the file, becomes filename extension.
258# @param size Size of file, becomes part of filename.
259# @param time Time (float), becomes part of filename.
260# @return The complete filename path, including directory.
261# @note The containing directory is created, if necessary.
262def create_filename(datadir, filename, format, size=None, time=None):
263    FN = check_dir(datadir) + filename
264
265    if size is not None:
266        FN += '_size%d' % size
267
268    if time is not None:
269        FN += '_time%.2f' % time
270
271    FN += '.' + format
272
273    return FN
274
275
276##
277# @brief Get all files with a standard name and a given set of attributes.
278# @param datadir Directory files must be in.
279# @param filename Filename stem.
280# @param format Filename extension.
281# @param size Filename size.
282# @return A list of fielnames (including directory) that match the attributes.
283def get_files(datadir, filename, format, size):
284    """Get all file (names) with given name, size and format
285    """
286
287    import glob
288
289    dir = check_dir(datadir)
290    pattern = dir + os.sep + filename + '_size=%d*.%s' % (size, format)
291
292    return glob.glob(pattern)
293
294
295##
296# @brief Generic class for storing output to e.g. visualisation or checkpointing
297class Data_format:
298    """Generic interface to data formats
299    """
300
301    ##
302    # @brief Instantiate this instance.
303    # @param domain
304    # @param extension
305    # @param mode The mode of the underlying file.
306    def __init__(self, domain, extension, mode=netcdf_mode_w):
307        assert mode[0] in ['r', 'w', 'a'], \
308               "Mode %s must be either:\n" % mode + \
309               "   'w' (write)\n" + \
310               "   'r' (read)\n" + \
311               "   'a' (append)"
312
313        #Create filename
314        self.filename = create_filename(domain.get_datadir(),
315                                        domain.get_name(), extension)
316
317        self.timestep = 0
318        self.domain = domain
319
320        # Exclude ghosts in case this is a parallel domain
321        self.number_of_nodes = domain.number_of_full_nodes
322        self.number_of_volumes = domain.number_of_full_triangles
323        #self.number_of_volumes = len(domain)
324
325        #FIXME: Should we have a general set_precision function?
326
327
328##
329# @brief Class for storing output to e.g. visualisation
330class Data_format_sww(Data_format):
331    """Interface to native NetCDF format (.sww) for storing model output
332
333    There are two kinds of data
334
335    1: Constant data: Vertex coordinates and field values. Stored once
336    2: Variable data: Conserved quantities. Stored once per timestep.
337
338    All data is assumed to reside at vertex locations.
339    """
340
341    ##
342    # @brief Instantiate this instance.
343    # @param domain ??
344    # @param mode Mode of the underlying data file.
345    # @param max_size ??
346    # @param recursion ??
347    # @note Prepare the underlying data file if mode starts with 'w'.
348    def __init__(self, domain, mode=netcdf_mode_w, max_size=2000000000, recursion=False):
349        from Scientific.IO.NetCDF import NetCDFFile
350
351        self.precision = netcdf_float32 #Use single precision for quantities
352        self.recursion = recursion
353        self.mode = mode
354        if hasattr(domain, 'max_size'):
355            self.max_size = domain.max_size #file size max is 2Gig
356        else:
357            self.max_size = max_size
358        if hasattr(domain, 'minimum_storable_height'):
359            self.minimum_storable_height = domain.minimum_storable_height
360        else:
361            self.minimum_storable_height = default_minimum_storable_height
362
363        # call owning constructor
364        Data_format.__init__(self, domain, 'sww', mode)
365
366        # NetCDF file definition
367        fid = NetCDFFile(self.filename, mode)
368        if mode[0] == 'w':
369            description = 'Output from anuga.abstract_2d_finite_volumes ' \
370                          'suitable for plotting'
371            self.writer = Write_sww()
372            self.writer.store_header(fid,
373                                     domain.starttime,
374                                     self.number_of_volumes,
375                                     self.domain.number_of_full_nodes,
376                                     description=description,
377                                     smoothing=domain.smooth,
378                                     order=domain.default_order,
379                                     sww_precision=self.precision)
380
381            # Extra optional information
382            if hasattr(domain, 'texture'):
383                fid.texture = domain.texture
384
385            if domain.quantities_to_be_monitored is not None:
386                fid.createDimension('singleton', 1)
387                fid.createDimension('two', 2)
388
389                poly = domain.monitor_polygon
390                if poly is not None:
391                    N = len(poly)
392                    fid.createDimension('polygon_length', N)
393                    fid.createVariable('extrema.polygon',
394                                       self.precision,
395                                       ('polygon_length', 'two'))
396                    fid.variables['extrema.polygon'][:] = poly
397
398                interval = domain.monitor_time_interval
399                if interval is not None:
400                    fid.createVariable('extrema.time_interval',
401                                       self.precision,
402                                       ('two',))
403                    fid.variables['extrema.time_interval'][:] = interval
404
405                for q in domain.quantities_to_be_monitored:
406                    fid.createVariable(q + '.extrema', self.precision,
407                                       ('numbers_in_range',))
408                    fid.createVariable(q + '.min_location', self.precision,
409                                       ('numbers_in_range',))
410                    fid.createVariable(q + '.max_location', self.precision,
411                                       ('numbers_in_range',))
412                    fid.createVariable(q + '.min_time', self.precision,
413                                       ('singleton',))
414                    fid.createVariable(q + '.max_time', self.precision,
415                                       ('singleton',))
416
417        fid.close()
418
419    ##
420    # @brief Store connectivity data into the underlying data file.
421    def store_connectivity(self):
422        """Specialisation of store_connectivity for net CDF format
423
424        Writes x,y,z coordinates of triangles constituting
425        the bed elevation.
426        """
427
428        from Scientific.IO.NetCDF import NetCDFFile
429
430        domain = self.domain
431
432        # append to the NetCDF file
433        fid = NetCDFFile(self.filename, netcdf_mode_a)
434
435#        # Get the variables
436#        x = fid.variables['x']
437#        y = fid.variables['y']
438#        z = fid.variables['elevation']
439#        volumes = fid.variables['volumes']
440
441        # Get X, Y and bed elevation Z
442        Q = domain.quantities['elevation']
443        X,Y,Z,V = Q.get_vertex_values(xy=True, precision=self.precision)
444
445        # store the connectivity data
446        points = num.concatenate( (X[:,num.newaxis],Y[:,num.newaxis]), axis=1 )
447        self.writer.store_triangulation(fid,
448                                        points,
449                                        V.astype(num.float32),
450                                        Z,
451                                        points_georeference=\
452                                            domain.geo_reference)
453
454        fid.close()
455
456    ##
457    # @brief Store time and named quantities to the underlying data file.
458    # @param names The names of the quantities to store.
459    # @note If 'names' not supplied, store a standard set of quantities.
460    def store_timestep(self, names=None):
461        """Store time and named quantities to file
462        """
463
464        from Scientific.IO.NetCDF import NetCDFFile
465        import types
466        from time import sleep
467        from os import stat
468
469        if names is None:
470            # Standard shallow water wave equation quantitites in ANUGA
471            names = ['stage', 'xmomentum', 'ymomentum']
472
473        # Get NetCDF
474        retries = 0
475        file_open = False
476        while not file_open and retries < 10:
477            try:
478                fid = NetCDFFile(self.filename, netcdf_mode_a) # Open existing file
479            except IOError:
480                # This could happen if someone was reading the file.
481                # In that case, wait a while and try again
482                msg = 'Warning (store_timestep): File %s could not be opened' \
483                      % self.filename
484                msg += ' - trying step %s again' % self.domain.time
485                print msg
486                retries += 1
487                sleep(1)
488            else:
489                file_open = True
490
491        if not file_open:
492            msg = 'File %s could not be opened for append' % self.filename
493            raise DataFileNotOpenError, msg
494
495        # Check to see if the file is already too big:
496        time = fid.variables['time']
497        i = len(time) + 1
498        file_size = stat(self.filename)[6]
499        file_size_increase = file_size / i
500        if file_size + file_size_increase > self.max_size * 2**self.recursion:
501            # In order to get the file name and start time correct,
502            # I change the domain.filename and domain.starttime.
503            # This is the only way to do this without changing
504            # other modules (I think).
505
506            # Write a filename addon that won't break swollens reader
507            # (10.sww is bad)
508            filename_ext = '_time_%s' % self.domain.time
509            filename_ext = filename_ext.replace('.', '_')
510
511            # Remember the old filename, then give domain a
512            # name with the extension
513            old_domain_filename = self.domain.get_name()
514            if not self.recursion:
515                self.domain.set_name(old_domain_filename + filename_ext)
516
517            # Change the domain starttime to the current time
518            old_domain_starttime = self.domain.starttime
519            self.domain.starttime = self.domain.time
520
521            # Build a new data_structure.
522            next_data_structure = Data_format_sww(self.domain, mode=self.mode,
523                                                  max_size=self.max_size,
524                                                  recursion=self.recursion+1)
525            if not self.recursion:
526                print '    file_size = %s' % file_size
527                print '    saving file to %s' % next_data_structure.filename
528
529            #set up the new data_structure
530            self.domain.writer = next_data_structure
531
532            #FIXME - could be cleaner to use domain.store_timestep etc.
533            next_data_structure.store_connectivity()
534            next_data_structure.store_timestep(names)
535            fid.sync()
536            fid.close()
537
538            #restore the old starttime and filename
539            self.domain.starttime = old_domain_starttime
540            self.domain.set_name(old_domain_filename)
541        else:
542            self.recursion = False
543            domain = self.domain
544
545            # Get the variables
546            time = fid.variables['time']
547            stage = fid.variables['stage']
548            xmomentum = fid.variables['xmomentum']
549            ymomentum = fid.variables['ymomentum']
550            i = len(time)
551            if type(names) not in [types.ListType, types.TupleType]:
552                names = [names]
553
554            if 'stage' in names \
555               and 'xmomentum' in names \
556               and 'ymomentum' in names:
557                # Get stage, elevation, depth and select only those
558                # values where minimum_storable_height is exceeded
559                Q = domain.quantities['stage']
560                A, _ = Q.get_vertex_values(xy=False, precision=self.precision)
561                z = fid.variables['elevation']
562
563                storable_indices = (A-z[:] >= self.minimum_storable_height)
564                stage = num.choose(storable_indices, (z[:], A))
565
566                # Define a zero vector of same size and type as A
567                # for use with momenta
568                null = num.zeros(num.size(A), A.dtype.char) #??#
569
570                # Get xmomentum where depth exceeds minimum_storable_height
571                Q = domain.quantities['xmomentum']
572                xmom, _ = Q.get_vertex_values(xy=False,
573                                              precision=self.precision)
574                xmomentum = num.choose(storable_indices, (null, xmom))
575
576
577                # Get ymomentum where depth exceeds minimum_storable_height
578                Q = domain.quantities['ymomentum']
579                ymom, _ = Q.get_vertex_values(xy=False,
580                                              precision=self.precision)
581                ymomentum = num.choose(storable_indices, (null, ymom))
582
583                # Write quantities to underlying data  file
584                self.writer.store_quantities(fid,
585                                             time=self.domain.time,
586                                             sww_precision=self.precision,
587                                             stage=stage,
588                                             xmomentum=xmomentum,
589                                             ymomentum=ymomentum)
590            else:
591                msg = 'Quantities stored must be: stage, xmomentum, ymomentum, '
592                msg += 'but I got: ' + str(names)
593                raise Exception, msg
594
595            # Update extrema if requested
596            domain = self.domain
597            if domain.quantities_to_be_monitored is not None:
598                for q, info in domain.quantities_to_be_monitored.items():
599                    if info['min'] is not None:
600                        fid.variables[q + '.extrema'][0] = info['min']
601                        fid.variables[q + '.min_location'][:] = \
602                                        info['min_location']
603                        fid.variables[q + '.min_time'][0] = info['min_time']
604
605                    if info['max'] is not None:
606                        fid.variables[q + '.extrema'][1] = info['max']
607                        fid.variables[q + '.max_location'][:] = \
608                                        info['max_location']
609                        fid.variables[q + '.max_time'][0] = info['max_time']
610
611            # Flush and close
612            fid.sync()
613            fid.close()
614
615
616##
617# @brief Class for handling checkpoints data
618class Data_format_cpt(Data_format):
619    """Interface to native NetCDF format (.cpt)
620    """
621
622    ##
623    # @brief Initialize this instantiation.
624    # @param domain ??
625    # @param mode Mode of underlying data file (default WRITE).
626    def __init__(self, domain, mode=netcdf_mode_w):
627        from Scientific.IO.NetCDF import NetCDFFile
628
629        self.precision = netcdf_float #Use full precision
630
631        Data_format.__init__(self, domain, 'sww', mode)
632
633        # NetCDF file definition
634        fid = NetCDFFile(self.filename, mode)
635        if mode[0] == 'w':
636            #Create new file
637            fid.institution = 'Geoscience Australia'
638            fid.description = 'Checkpoint data'
639            #fid.smooth = domain.smooth
640            fid.order = domain.default_order
641
642            # dimension definitions
643            fid.createDimension('number_of_volumes', self.number_of_volumes)
644            fid.createDimension('number_of_vertices', 3)
645
646            #Store info at all vertices (no smoothing)
647            fid.createDimension('number_of_points', 3*self.number_of_volumes)
648            fid.createDimension('number_of_timesteps', None) #extensible
649
650            # variable definitions
651
652            #Mesh
653            fid.createVariable('x', self.precision, ('number_of_points',))
654            fid.createVariable('y', self.precision, ('number_of_points',))
655
656
657            fid.createVariable('volumes', netcdf_int, ('number_of_volumes',
658                                                       'number_of_vertices'))
659
660            fid.createVariable('time', self.precision, ('number_of_timesteps',))
661
662            #Allocate space for all quantities
663            for name in domain.quantities.keys():
664                fid.createVariable(name, self.precision,
665                                   ('number_of_timesteps',
666                                    'number_of_points'))
667
668        fid.close()
669
670    ##
671    # @brief Store connectivity data to underlying data file.
672    def store_checkpoint(self):
673        """Write x,y coordinates of triangles.
674        Write connectivity (
675        constituting
676        the bed elevation.
677        """
678
679        from Scientific.IO.NetCDF import NetCDFFile
680
681        domain = self.domain
682
683        #Get NetCDF
684        fid = NetCDFFile(self.filename, netcdf_mode_a)
685
686        # Get the variables
687        x = fid.variables['x']
688        y = fid.variables['y']
689
690        volumes = fid.variables['volumes']
691
692        # Get X, Y and bed elevation Z
693        Q = domain.quantities['elevation']
694        X,Y,Z,V = Q.get_vertex_values(xy=True, precision=self.precision)
695
696        x[:] = X.astype(self.precision)
697        y[:] = Y.astype(self.precision)
698        z[:] = Z.astype(self.precision)
699
700        volumes[:] = V
701
702        fid.close()
703
704    ##
705    # @brief Store tiem and named quantities to underlying data file.
706    # @param name
707    def store_timestep(self, name):
708        """Store time and named quantity to file
709        """
710
711        from Scientific.IO.NetCDF import NetCDFFile
712        from time import sleep
713
714        #Get NetCDF
715        retries = 0
716        file_open = False
717        while not file_open and retries < 10:
718            try:
719                fid = NetCDFFile(self.filename, netcdf_mode_a)
720            except IOError:
721                #This could happen if someone was reading the file.
722                #In that case, wait a while and try again
723                msg = 'Warning (store_timestep): File %s could not be opened' \
724                      ' - trying again' % self.filename
725                print msg
726                retries += 1
727                sleep(1)
728            else:
729                file_open = True
730
731        if not file_open:
732            msg = 'File %s could not be opened for append' % self.filename
733            raise DataFileNotOpenError, msg
734
735        domain = self.domain
736
737        # Get the variables
738        time = fid.variables['time']
739        stage = fid.variables['stage']
740        i = len(time)
741
742        #Store stage
743        time[i] = self.domain.time
744
745        # Get quantity
746        Q = domain.quantities[name]
747        A,V = Q.get_vertex_values(xy=False, precision=self.precision)
748
749        stage[i,:] = A.astype(self.precision)
750
751        #Flush and close
752        fid.sync()
753        fid.close()
754
755
756##
757# @brief Class for National Exposure Database storage (NEXIS).
758
759LAT_TITLE = 'LATITUDE'
760LONG_TITLE = 'LONGITUDE'
761X_TITLE = 'x'
762Y_TITLE = 'y'
763
764class Exposure_csv:
765
766    ##
767    # @brief Instantiate this instance.
768    # @param file_name Name of underlying data file.
769    # @param latitude_title ??
770    # @param longitude_title ??
771    # @param is_x_y_locations ??
772    # @param x_title ??
773    # @param y_title ??
774    # @param refine_polygon ??
775    # @param title_check_list ??
776    def __init__(self,file_name, latitude_title=LAT_TITLE,
777                 longitude_title=LONG_TITLE, is_x_y_locations=None,
778                 x_title=X_TITLE, y_title=Y_TITLE,
779                 refine_polygon=None, title_check_list=None):
780        """
781        This class is for handling the exposure csv file.
782        It reads the file in and converts the lats and longs to a geospatial
783        data object.
784        Use the methods to read and write columns.
785
786        The format of the csv files it reads is;
787           The first row is a title row.
788           comma's are the delimiters
789           each column is a 'set' of data
790
791        Feel free to use/expand it to read other csv files.
792
793        It is not for adding and deleting rows
794
795        Can geospatial handle string attributes? It's not made for them.
796        Currently it can't load and save string att's.
797
798        So just use geospatial to hold the x, y and georef? Bad, since
799        different att's are in diferent structures.  Not so bad, the info
800        to write if the .csv file is saved is in attribute_dic
801
802        The location info is in the geospatial attribute.
803        """
804
805        self._file_name = file_name
806        self._geospatial = None #
807
808        # self._attribute_dic is a dictionary.
809        #The keys are the column titles.
810        #The values are lists of column data
811
812        # self._title_index_dic is a dictionary.
813        #The keys are the column titles.
814        #The values are the index positions of file columns.
815        self._attribute_dic, self._title_index_dic = \
816            csv2dict(self._file_name, title_check_list=title_check_list)
817        try:
818            #Have code here that handles caps or lower
819            lats = self._attribute_dic[latitude_title]
820            longs = self._attribute_dic[longitude_title]
821        except KeyError:
822            # maybe a warning..
823            #Let's see if this works..
824            if False != is_x_y_locations:
825                is_x_y_locations = True
826            pass
827        else:
828            self._geospatial = Geospatial_data(latitudes=lats,
829                                               longitudes=longs)
830
831        if is_x_y_locations is True:
832            if self._geospatial is not None:
833                pass #fixme throw an error
834            try:
835                xs = self._attribute_dic[x_title]
836                ys = self._attribute_dic[y_title]
837                points = [[float(i),float(j)] for i,j in map(None,xs,ys)]
838            except KeyError:
839                # maybe a warning..
840                msg = "Could not find location information."
841                raise TitleValueError, msg
842            else:
843                self._geospatial = Geospatial_data(data_points=points)
844
845        # create a list of points that are in the refining_polygon
846        # described by a list of indexes representing the points
847
848    ##
849    # @brief Create a comparison method.
850    # @param self This object.
851    # @param other The other object.
852    # @return True if objects are 'same'.
853    def __cmp__(self, other):
854        #check that 'other' is an instance of this class
855        if isinstance(self, type(other)):
856            result = cmp(self._attribute_dic, other._attribute_dic)
857            if result <> 0:
858                return result
859
860            # The order of the columns is important. Therefore..
861            result = cmp(self._title_index_dic, other._title_index_dic)
862            if result <> 0:
863                return result
864            for self_ls, other_ls in map(None, self._attribute_dic,
865                                         other._attribute_dic):
866                result = cmp(self._attribute_dic[self_ls],
867                             other._attribute_dic[other_ls])
868                if result <> 0:
869                    return result
870            return 0
871        else:
872            return 1
873
874    ##
875    # @brief Get a list of column values given a column name.
876    # @param column_name The name of the column to get values from.
877    # @param use_refind_polygon Unused??
878    def get_column(self, column_name, use_refind_polygon=False):
879        """
880        Given a column name return a list of the column values
881
882        Note, the type of the values will be String!
883        do this to change a list of strings to a list of floats
884        time = [float(x) for x in time]
885
886        Not implemented:
887        if use_refind_polygon is True, only return values in the
888        refined polygon
889        """
890
891        if not self._attribute_dic.has_key(column_name):
892            msg = 'There is no column called %s!' % column_name
893            raise TitleValueError, msg
894
895        return self._attribute_dic[column_name]
896
897    ##
898    # @brief ??
899    # @param value_column_name ??
900    # @param known_column_name ??
901    # @param known_values ??
902    # @param use_refind_polygon ??
903    def get_value(self, value_column_name, known_column_name,
904                  known_values, use_refind_polygon=False):
905        """
906        Do linear interpolation on the known_colum, using the known_value,
907        to return a value of the column_value_name.
908        """
909
910        pass
911
912    ##
913    # @brief Get a geospatial object that describes the locations.
914    # @param use_refind_polygon Unused??
915    def get_location(self, use_refind_polygon=False):
916        """
917        Return a geospatial object which describes the
918        locations of the location file.
919
920        Note, if there is not location info, this returns None.
921
922        Not implemented:
923        if use_refind_polygon is True, only return values in the
924        refined polygon
925        """
926
927        return self._geospatial
928
929    ##
930    # @brief Add column to 'end' of CSV data.
931    # @param column_name The new column name.
932    # @param column_values The new column values.
933    # @param overwrite If True, overwrites last column, doesn't add at end.
934    def set_column(self, column_name, column_values, overwrite=False):
935        """
936        Add a column to the 'end' (with the right most column being the end)
937        of the csv file.
938
939        Set overwrite to True if you want to overwrite a column.
940
941        Note, in column_name white space is removed and case is not checked.
942        Precondition
943        The column_name and column_values cannot have comma's in it.
944        """
945
946        # sanity checks
947        value_row_count = \
948                len(self._attribute_dic[self._title_index_dic.keys()[0]])
949        if len(column_values) <> value_row_count:
950            msg = 'The number of column values must equal the number of rows.'
951            raise DataMissingValuesError, msg
952
953        # check new column name isn't already used, and we aren't overwriting
954        if self._attribute_dic.has_key(column_name):
955            if not overwrite:
956                msg = 'Column name %s already in use!' % column_name
957                raise TitleValueError, msg
958        else:
959            # New title.  Add it to the title index.
960            self._title_index_dic[column_name] = len(self._title_index_dic)
961
962        self._attribute_dic[column_name] = column_values
963
964    ##
965    # @brief Save the exposure CSV  file.
966    # @param file_name If supplied, use this filename, not original.
967    def save(self, file_name=None):
968        """
969        Save the exposure csv file
970        """
971
972        if file_name is None:
973            file_name = self._file_name
974
975        fd = open(file_name, 'wb')
976        writer = csv.writer(fd)
977
978        #Write the title to a cvs file
979        line = [None] * len(self._title_index_dic)
980        for title in self._title_index_dic.iterkeys():
981            line[self._title_index_dic[title]] = title
982        writer.writerow(line)
983
984        # Write the values to a cvs file
985        value_row_count = \
986                len(self._attribute_dic[self._title_index_dic.keys()[0]])
987        for row_i in range(value_row_count):
988            line = [None] * len(self._title_index_dic)
989            for title in self._title_index_dic.iterkeys():
990                line[self._title_index_dic[title]] = \
991                     self._attribute_dic[title][row_i]
992            writer.writerow(line)
993
994
995def csv2building_polygons(file_name,
996                          floor_height=3,
997                          clipping_polygons=None):
998    """
999    Convert CSV files of the form:
1000
1001    easting,northing,id,floors
1002    422664.22,870785.46,2,0
1003    422672.48,870780.14,2,0
1004    422668.17,870772.62,2,0
1005    422660.35,870777.17,2,0
1006    422664.22,870785.46,2,0
1007    422661.30,871215.06,3,1
1008    422667.50,871215.70,3,1
1009    422668.30,871204.86,3,1
1010    422662.21,871204.33,3,1
1011    422661.30,871215.06,3,1
1012
1013    to a dictionary of polygons with id as key.
1014    The associated number of floors are converted to m above MSL and
1015    returned as a separate dictionary also keyed by id.
1016   
1017    Optional parameter floor_height is the height of each building story.
1018    Optional parameter clipping_olygons is a list of polygons selecting
1019    buildings. Any building not in these polygons will be omitted.
1020   
1021    See csv2polygons for more details
1022    """
1023
1024    polygons, values = csv2polygons(file_name,
1025                                    value_name='floors',
1026                                    clipping_polygons=None)   
1027
1028   
1029    heights = {}
1030    for key in values.keys():
1031        v = float(values[key])
1032        heights[key] = v*floor_height
1033       
1034    return polygons, heights               
1035           
1036
1037##
1038# @brief Convert CSV file into a dictionary of polygons and associated values.
1039# @param filename The path to the file to read, value_name name for the 4th column
1040def csv2polygons(file_name,
1041                 value_name='value',
1042                 clipping_polygons=None):
1043    """
1044    Convert CSV files of the form:
1045
1046    easting,northing,id,value
1047    422664.22,870785.46,2,0
1048    422672.48,870780.14,2,0
1049    422668.17,870772.62,2,0
1050    422660.35,870777.17,2,0
1051    422664.22,870785.46,2,0
1052    422661.30,871215.06,3,1
1053    422667.50,871215.70,3,1
1054    422668.30,871204.86,3,1
1055    422662.21,871204.33,3,1
1056    422661.30,871215.06,3,1
1057
1058    to a dictionary of polygons with id as key.
1059    The associated values are returned as a separate dictionary also keyed by id.
1060
1061
1062    easting: x coordinate relative to zone implied by the model
1063    northing: y coordinate relative to zone implied by the model   
1064    id: tag for polygon comprising points with this tag
1065    value: numeral associated with each polygon. These must be the same for all points in each polygon.
1066   
1067    The last header, value, can take on other names such as roughness, floors, etc - or it can be omitted
1068    in which case the returned values will be None
1069   
1070    Eastings and Northings will be returned as floating point values while
1071    id and values will be returned as strings.
1072
1073    Optional argument: clipping_polygons will select only those polygons that are
1074    fully within one or more of the clipping_polygons. In other words any polygon from
1075    the csv file which has at least one point not inside one of the clipping polygons
1076    will be excluded
1077   
1078    See underlying function csv2dict for more details.
1079    """
1080
1081    X, _ = csv2dict(file_name)
1082
1083    msg = 'Polygon csv file must have 3 or 4 columns'
1084    assert len(X.keys()) in [3, 4], msg
1085   
1086    msg = 'Did not find expected column header: easting'
1087    assert 'easting' in X.keys(), msg
1088   
1089    msg = 'Did not find expected column header: northing'   
1090    assert 'northing' in X.keys(), northing
1091   
1092    msg = 'Did not find expected column header: northing'       
1093    assert 'id' in X.keys(), msg
1094   
1095    if value_name is not None:
1096        msg = 'Did not find expected column header: %s' % value_name       
1097        assert value_name in X.keys(), msg   
1098   
1099    polygons = {}
1100    if len(X.keys()) == 4:
1101        values = {}
1102    else:
1103        values = None
1104
1105    # Loop through entries and compose polygons
1106    excluded_polygons={}
1107    past_ids = {}
1108    last_id = None
1109    for i, id in enumerate(X['id']):
1110
1111        # Check for duplicate polygons
1112        if id in past_ids:
1113            msg = 'Polygon %s was duplicated in line %d' % (id, i)
1114            raise Exception, msg
1115       
1116        if id not in polygons:
1117            # Start new polygon
1118            polygons[id] = []
1119            if values is not None:
1120                values[id] = X[value_name][i]
1121
1122            # Keep track of previous polygon ids
1123            if last_id is not None:
1124                past_ids[last_id] = i
1125           
1126        # Append this point to current polygon
1127        point = [float(X['easting'][i]), float(X['northing'][i])]
1128
1129        if clipping_polygons is not None:
1130            exclude=True
1131            for clipping_polygon in clipping_polygons:
1132                if inside_polygon(point, clipping_polygon):
1133                    exclude=False
1134                    break
1135               
1136            if exclude is True:
1137                excluded_polygons[id]=True
1138
1139        polygons[id].append(point)   
1140           
1141        # Check that value is the same across each polygon
1142        msg = 'Values must be the same across each polygon.'
1143        msg += 'I got %s in line %d but it should have been %s' % (X[value_name][i], i, values[id])
1144        assert values[id] == X[value_name][i], msg
1145
1146        last_id = id
1147
1148    # Weed out polygons that were not wholly inside clipping polygons
1149    for id in excluded_polygons:
1150        del polygons[id]
1151       
1152    return polygons, values
1153
1154
1155           
1156           
1157##
1158# @brief Convert CSV file to a dictionary of arrays.
1159# @param file_name The path to the file to read.
1160def csv2array(file_name):
1161    """
1162    Convert CSV files of the form:
1163
1164    time, discharge, velocity
1165    0.0,  1.2,       0.0
1166    0.1,  3.2,       1.1
1167    ...
1168
1169    to a dictionary of numeric arrays.
1170
1171
1172    See underlying function csv2dict for more details.
1173    """
1174
1175    X, _ = csv2dict(file_name)
1176
1177    Y = {}
1178    for key in X.keys():
1179        Y[key] = num.array([float(x) for x in X[key]])
1180
1181    return Y
1182
1183
1184##
1185# @brief Read a CSV file and convert to a dictionary of {key: column}.
1186# @param file_name The path to the file to read.
1187# @param title_check_list List of titles that *must* be columns in the file.
1188# @return Two dicts: ({key:column}, {title:index}).
1189# @note WARNING: Values are returned as strings.
1190def csv2dict(file_name, title_check_list=None):
1191    """
1192    Load in the csv as a dictionary, title as key and column info as value.
1193    Also, create a dictionary, title as key and column index as value,
1194    to keep track of the column order.
1195
1196    Two dictionaries are returned.
1197
1198    WARNING: Values are returned as strings.
1199             Do this to change a list of strings to a list of floats
1200                 time = [float(x) for x in time]
1201    """
1202
1203    # FIXME(Ole): Consider dealing with files without headers
1204    # FIXME(Ole): Consider a wrapper automatically converting text fields
1205    #             to the right type by trying for: int, float, string
1206   
1207    attribute_dic = {}
1208    title_index_dic = {}
1209    titles_stripped = [] # List of titles
1210
1211    reader = csv.reader(file(file_name))
1212
1213    # Read in and manipulate the title info
1214    titles = reader.next()
1215    for i, title in enumerate(titles):
1216        header = title.strip()
1217        titles_stripped.append(header)
1218        title_index_dic[header] = i
1219    title_count = len(titles_stripped)
1220
1221    # Check required columns
1222    if title_check_list is not None:
1223        for title_check in title_check_list:
1224            if not title_index_dic.has_key(title_check):
1225                msg = 'Reading error. This row is not present %s' % title_check
1226                raise IOError, msg
1227
1228    # Create a dictionary of column values, indexed by column title
1229    for line in reader:
1230        n = len(line) # Number of entries
1231        if n != title_count:
1232            msg = 'Entry in file %s had %d columns ' % (file_name, n)
1233            msg += 'although there were %d headers' % title_count
1234            raise IOError, msg
1235        for i, value in enumerate(line):
1236            attribute_dic.setdefault(titles_stripped[i], []).append(value)
1237
1238    return attribute_dic, title_index_dic
1239
1240
1241##
1242# @brief
1243# @param filename
1244# @param x
1245# @param y
1246# @param z
1247def write_obj(filename, x, y, z):
1248    """Store x,y,z vectors into filename (obj format).
1249
1250       Vectors are assumed to have dimension (M,3) where
1251       M corresponds to the number elements.
1252       triangles are assumed to be disconnected
1253
1254       The three numbers in each vector correspond to three vertices,
1255
1256       e.g. the x coordinate of vertex 1 of element i is in x[i,1]
1257    """
1258
1259    import os.path
1260
1261    root, ext = os.path.splitext(filename)
1262    if ext == '.obj':
1263        FN = filename
1264    else:
1265        FN = filename + '.obj'
1266
1267    outfile = open(FN, 'wb')
1268    outfile.write("# Triangulation as an obj file\n")
1269
1270    M, N = x.shape
1271    assert N == 3  #Assuming three vertices per element
1272
1273    for i in range(M):
1274        for j in range(N):
1275            outfile.write("v %f %f %f\n" % (x[i,j], y[i,j], z[i,j]))
1276
1277    for i in range(M):
1278        base = i * N
1279        outfile.write("f %d %d %d\n" % (base+1, base+2, base+3))
1280
1281    outfile.close()
1282
1283
1284#########################################################
1285#Conversion routines
1286########################################################
1287
1288##
1289# @brief Convert SWW data to OBJ data.
1290# @param basefilename Stem of filename, needs size and extension added.
1291# @param size The number of lines to write.
1292def sww2obj(basefilename, size):
1293    """Convert netcdf based data output to obj
1294    """
1295
1296    from Scientific.IO.NetCDF import NetCDFFile
1297
1298    # Get NetCDF
1299    FN = create_filename('.', basefilename, 'sww', size)
1300    print 'Reading from ', FN
1301    fid = NetCDFFile(FN, netcdf_mode_r)  #Open existing file for read
1302
1303    # Get the variables
1304    x = fid.variables['x']
1305    y = fid.variables['y']
1306    z = fid.variables['elevation']
1307    time = fid.variables['time']
1308    stage = fid.variables['stage']
1309
1310    M = size  #Number of lines
1311    xx = num.zeros((M,3), num.float)
1312    yy = num.zeros((M,3), num.float)
1313    zz = num.zeros((M,3), num.float)
1314
1315    for i in range(M):
1316        for j in range(3):
1317            xx[i,j] = x[i+j*M]
1318            yy[i,j] = y[i+j*M]
1319            zz[i,j] = z[i+j*M]
1320
1321    # Write obj for bathymetry
1322    FN = create_filename('.', basefilename, 'obj', size)
1323    write_obj(FN,xx,yy,zz)
1324
1325    # Now read all the data with variable information, combine with
1326    # x,y info and store as obj
1327    for k in range(len(time)):
1328        t = time[k]
1329        print 'Processing timestep %f' %t
1330
1331        for i in range(M):
1332            for j in range(3):
1333                zz[i,j] = stage[k,i+j*M]
1334
1335        #Write obj for variable data
1336        #FN = create_filename(basefilename, 'obj', size, time=t)
1337        FN = create_filename('.', basefilename[:5], 'obj', size, time=t)
1338        write_obj(FN, xx, yy, zz)
1339
1340
1341##
1342# @brief
1343# @param basefilename Stem of filename, needs size and extension added.
1344def dat2obj(basefilename):
1345    """Convert line based data output to obj
1346    FIXME: Obsolete?
1347    """
1348
1349    import glob, os
1350    from anuga.config import data_dir
1351
1352    # Get bathymetry and x,y's
1353    lines = open(data_dir+os.sep+basefilename+'_geometry.dat', 'r').readlines()
1354
1355    M = len(lines)  #Number of lines
1356    x = num.zeros((M,3), num.float)
1357    y = num.zeros((M,3), num.float)
1358    z = num.zeros((M,3), num.float)
1359
1360    for i, line in enumerate(lines):
1361        tokens = line.split()
1362        values = map(float, tokens)
1363
1364        for j in range(3):
1365            x[i,j] = values[j*3]
1366            y[i,j] = values[j*3+1]
1367            z[i,j] = values[j*3+2]
1368
1369    # Write obj for bathymetry
1370    write_obj(data_dir + os.sep + basefilename + '_geometry', x, y, z)
1371
1372    # Now read all the data files with variable information, combine with
1373    # x,y info and store as obj.
1374
1375    files = glob.glob(data_dir + os.sep + basefilename + '*.dat')
1376    for filename in files:
1377        print 'Processing %s' % filename
1378
1379        lines = open(data_dir + os.sep + filename, 'r').readlines()
1380        assert len(lines) == M
1381        root, ext = os.path.splitext(filename)
1382
1383        # Get time from filename
1384        i0 = filename.find('_time=')
1385        if i0 == -1:
1386            #Skip bathymetry file
1387            continue
1388
1389        i0 += 6  #Position where time starts
1390        i1 = filename.find('.dat')
1391
1392        if i1 > i0:
1393            t = float(filename[i0:i1])
1394        else:
1395            raise DataTimeError, 'Hmmmm'
1396
1397        for i, line in enumerate(lines):
1398            tokens = line.split()
1399            values = map(float,tokens)
1400
1401            for j in range(3):
1402                z[i,j] = values[j]
1403
1404        # Write obj for variable data
1405        write_obj(data_dir + os.sep + basefilename + '_time=%.4f' % t, x, y, z)
1406
1407
1408##
1409# @brief Filter data file, selecting timesteps first:step:last.
1410# @param filename1 Data file to filter.
1411# @param filename2 File to write filtered timesteps to.
1412# @param first First timestep.
1413# @param last Last timestep.
1414# @param step Timestep stride.
1415def filter_netcdf(filename1, filename2, first=0, last=None, step=1):
1416    """Filter data file, selecting timesteps first:step:last.
1417   
1418    Read netcdf filename1, pick timesteps first:step:last and save to
1419    nettcdf file filename2
1420    """
1421
1422    from Scientific.IO.NetCDF import NetCDFFile
1423
1424    # Get NetCDF
1425    infile = NetCDFFile(filename1, netcdf_mode_r)  #Open existing file for read
1426    outfile = NetCDFFile(filename2, netcdf_mode_w)  #Open new file
1427
1428    # Copy dimensions
1429    for d in infile.dimensions:
1430        outfile.createDimension(d, infile.dimensions[d])
1431
1432    # Copy variable definitions
1433    for name in infile.variables:
1434        var = infile.variables[name]
1435        outfile.createVariable(name, var.dtype.char, var.dimensions)    #??#
1436
1437    # Copy the static variables
1438    for name in infile.variables:
1439        if name == 'time' or name == 'stage':
1440            pass
1441        else:
1442            outfile.variables[name][:] = infile.variables[name][:]
1443
1444    # Copy selected timesteps
1445    time = infile.variables['time']
1446    stage = infile.variables['stage']
1447
1448    newtime = outfile.variables['time']
1449    newstage = outfile.variables['stage']
1450
1451    if last is None:
1452        last = len(time)
1453
1454    selection = range(first, last, step)
1455    for i, j in enumerate(selection):
1456        print 'Copying timestep %d of %d (%f)' % (j, last-first, time[j])
1457        newtime[i] = time[j]
1458        newstage[i,:] = stage[j,:]
1459
1460    # Close
1461    infile.close()
1462    outfile.close()
1463
1464
1465##
1466# @brief Return instance of class of given format using filename.
1467# @param domain Data domain (eg, 'sww', etc).
1468# @param mode The mode to open domain in.
1469# @return A class instance of required domain and mode.
1470#Get data objects
1471def get_dataobject(domain, mode=netcdf_mode_w):
1472    """Return instance of class of given format using filename
1473    """
1474
1475    cls = eval('Data_format_%s' % domain.format)
1476    return cls(domain, mode)
1477
1478
1479##
1480# @brief Convert DEM data  to PTS data.
1481# @param basename_in Stem of input filename.
1482# @param basename_out Stem of output filename.
1483# @param easting_min
1484# @param easting_max
1485# @param northing_min
1486# @param northing_max
1487# @param use_cache
1488# @param verbose
1489# @return
1490def dem2pts(basename_in, basename_out=None,
1491            easting_min=None, easting_max=None,
1492            northing_min=None, northing_max=None,
1493            use_cache=False, verbose=False,):
1494    """Read Digitial Elevation model from the following NetCDF format (.dem)
1495
1496    Example:
1497
1498    ncols         3121
1499    nrows         1800
1500    xllcorner     722000
1501    yllcorner     5893000
1502    cellsize      25
1503    NODATA_value  -9999
1504    138.3698 137.4194 136.5062 135.5558 ..........
1505
1506    Convert to NetCDF pts format which is
1507
1508    points:  (Nx2) float array
1509    elevation: N float array
1510    """
1511
1512    kwargs = {'basename_out': basename_out,
1513              'easting_min': easting_min,
1514              'easting_max': easting_max,
1515              'northing_min': northing_min,
1516              'northing_max': northing_max,
1517              'verbose': verbose}
1518
1519    if use_cache is True:
1520        from caching import cache
1521        result = cache(_dem2pts, basename_in, kwargs,
1522                       dependencies = [basename_in + '.dem'],
1523                       verbose = verbose)
1524
1525    else:
1526        result = apply(_dem2pts, [basename_in], kwargs)
1527
1528    return result
1529
1530
1531##
1532# @brief
1533# @param basename_in
1534# @param basename_out
1535# @param verbose
1536# @param easting_min
1537# @param easting_max
1538# @param northing_min
1539# @param northing_max
1540def _dem2pts(basename_in, basename_out=None, verbose=False,
1541            easting_min=None, easting_max=None,
1542            northing_min=None, northing_max=None):
1543    """Read Digitial Elevation model from the following NetCDF format (.dem)
1544
1545    Internal function. See public function dem2pts for details.
1546    """
1547
1548    # FIXME: Can this be written feasibly using write_pts?
1549
1550    import os
1551    from Scientific.IO.NetCDF import NetCDFFile
1552
1553    root = basename_in
1554
1555    # Get NetCDF
1556    infile = NetCDFFile(root + '.dem', netcdf_mode_r) 
1557
1558    if verbose: print 'Reading DEM from %s' %(root + '.dem')
1559
1560    ncols = infile.ncols[0]
1561    nrows = infile.nrows[0]
1562    xllcorner = infile.xllcorner[0]  # Easting of lower left corner
1563    yllcorner = infile.yllcorner[0]  # Northing of lower left corner
1564    cellsize = infile.cellsize[0]
1565    NODATA_value = infile.NODATA_value[0]
1566    dem_elevation = infile.variables['elevation']
1567
1568    zone = infile.zone[0]
1569    false_easting = infile.false_easting[0]
1570    false_northing = infile.false_northing[0]
1571
1572    # Text strings
1573    projection = infile.projection
1574    datum = infile.datum
1575    units = infile.units
1576
1577    # Get output file
1578    if basename_out == None:
1579        ptsname = root + '.pts'
1580    else:
1581        ptsname = basename_out + '.pts'
1582
1583    if verbose: print 'Store to NetCDF file %s' %ptsname
1584
1585    # NetCDF file definition
1586    outfile = NetCDFFile(ptsname, netcdf_mode_w)
1587
1588    # Create new file
1589    outfile.institution = 'Geoscience Australia'
1590    outfile.description = 'NetCDF pts format for compact and portable ' \
1591                          'storage of spatial point data'
1592
1593    # Assign default values
1594    if easting_min is None: easting_min = xllcorner
1595    if easting_max is None: easting_max = xllcorner + ncols*cellsize
1596    if northing_min is None: northing_min = yllcorner
1597    if northing_max is None: northing_max = yllcorner + nrows*cellsize
1598
1599    # Compute offsets to update georeferencing
1600    easting_offset = xllcorner - easting_min
1601    northing_offset = yllcorner - northing_min
1602
1603    # Georeferencing
1604    outfile.zone = zone
1605    outfile.xllcorner = easting_min # Easting of lower left corner
1606    outfile.yllcorner = northing_min # Northing of lower left corner
1607    outfile.false_easting = false_easting
1608    outfile.false_northing = false_northing
1609
1610    outfile.projection = projection
1611    outfile.datum = datum
1612    outfile.units = units
1613
1614    # Grid info (FIXME: probably not going to be used, but heck)
1615    outfile.ncols = ncols
1616    outfile.nrows = nrows
1617
1618    dem_elevation_r = num.reshape(dem_elevation, (nrows, ncols))
1619    totalnopoints = nrows*ncols
1620
1621    # Calculating number of NODATA_values for each row in clipped region
1622    # FIXME: use array operations to do faster
1623    nn = 0
1624    k = 0
1625    i1_0 = 0
1626    j1_0 = 0
1627    thisj = 0
1628    thisi = 0
1629    for i in range(nrows):
1630        y = (nrows-i-1)*cellsize + yllcorner
1631        for j in range(ncols):
1632            x = j*cellsize + xllcorner
1633            if easting_min <= x <= easting_max \
1634               and northing_min <= y <= northing_max:
1635                thisj = j
1636                thisi = i
1637                if dem_elevation_r[i,j] == NODATA_value:
1638                    nn += 1
1639
1640                if k == 0:
1641                    i1_0 = i
1642                    j1_0 = j
1643
1644                k += 1
1645
1646    index1 = j1_0
1647    index2 = thisj
1648
1649    # Dimension definitions
1650    nrows_in_bounding_box = int(round((northing_max-northing_min)/cellsize))
1651    ncols_in_bounding_box = int(round((easting_max-easting_min)/cellsize))
1652
1653    clippednopoints = (thisi+1-i1_0)*(thisj+1-j1_0)
1654    nopoints = clippednopoints-nn
1655
1656    clipped_dem_elev = dem_elevation_r[i1_0:thisi+1,j1_0:thisj+1]
1657
1658    if verbose:
1659        print 'There are %d values in the elevation' %totalnopoints
1660        print 'There are %d values in the clipped elevation' %clippednopoints
1661        print 'There are %d NODATA_values in the clipped elevation' %nn
1662
1663    outfile.createDimension('number_of_points', nopoints)
1664    outfile.createDimension('number_of_dimensions', 2) #This is 2d data
1665
1666    # Variable definitions
1667    outfile.createVariable('points', netcdf_float, ('number_of_points',
1668                                                    'number_of_dimensions'))
1669    outfile.createVariable('elevation', netcdf_float, ('number_of_points',))
1670
1671    # Get handles to the variables
1672    points = outfile.variables['points']
1673    elevation = outfile.variables['elevation']
1674
1675    lenv = index2-index1+1
1676
1677    # Store data
1678    global_index = 0
1679    # for i in range(nrows):
1680    for i in range(i1_0, thisi+1, 1):
1681        if verbose and i % ((nrows+10)/10) == 0:
1682            print 'Processing row %d of %d' % (i, nrows)
1683
1684        lower_index = global_index
1685
1686        v = dem_elevation_r[i,index1:index2+1]
1687        no_NODATA = num.sum(v == NODATA_value)
1688        if no_NODATA > 0:
1689            newcols = lenv - no_NODATA  # ncols_in_bounding_box - no_NODATA
1690        else:
1691            newcols = lenv              # ncols_in_bounding_box
1692
1693        telev = num.zeros(newcols, num.float)
1694        tpoints = num.zeros((newcols, 2), num.float)
1695
1696        local_index = 0
1697
1698        y = (nrows-i-1)*cellsize + yllcorner
1699        #for j in range(ncols):
1700        for j in range(j1_0,index2+1,1):
1701            x = j*cellsize + xllcorner
1702            if easting_min <= x <= easting_max \
1703               and northing_min <= y <= northing_max \
1704               and dem_elevation_r[i,j] <> NODATA_value:
1705                tpoints[local_index, :] = [x-easting_min, y-northing_min]
1706                telev[local_index] = dem_elevation_r[i, j]
1707                global_index += 1
1708                local_index += 1
1709
1710        upper_index = global_index
1711
1712        if upper_index == lower_index + newcols:
1713            points[lower_index:upper_index, :] = tpoints
1714            elevation[lower_index:upper_index] = telev
1715
1716    assert global_index == nopoints, 'index not equal to number of points'
1717
1718    infile.close()
1719    outfile.close()
1720
1721
1722##
1723# @brief  Return block of surface lines for each cross section
1724# @param lines Iterble  of text lines to process.
1725# @note BROKEN?  UNUSED?
1726def _read_hecras_cross_sections(lines):
1727    """Return block of surface lines for each cross section
1728    Starts with SURFACE LINE,
1729    Ends with END CROSS-SECTION
1730    """
1731
1732    points = []
1733
1734    reading_surface = False
1735    for i, line in enumerate(lines):
1736        if len(line.strip()) == 0:    #Ignore blanks
1737            continue
1738
1739        if lines[i].strip().startswith('SURFACE LINE'):
1740            reading_surface = True
1741            continue
1742
1743        if lines[i].strip().startswith('END') and reading_surface:
1744            yield points
1745            reading_surface = False
1746            points = []
1747
1748        if reading_surface:
1749            fields = line.strip().split(',')
1750            easting = float(fields[0])
1751            northing = float(fields[1])
1752            elevation = float(fields[2])
1753            points.append([easting, northing, elevation])
1754
1755
1756##
1757# @brief Convert HECRAS (.sdf) file to PTS file.
1758# @param basename_in Sterm of input filename.
1759# @param basename_out Sterm of output filename.
1760# @param verbose True if this function is to be verbose.
1761def hecras_cross_sections2pts(basename_in,
1762                              basename_out=None,
1763                              verbose=False):
1764    """Read HEC-RAS Elevation datal from the following ASCII format (.sdf)
1765
1766    Example:
1767
1768# RAS export file created on Mon 15Aug2005 11:42
1769# by HEC-RAS Version 3.1.1
1770
1771BEGIN HEADER:
1772  UNITS: METRIC
1773  DTM TYPE: TIN
1774  DTM: v:\1\cit\perth_topo\river_tin
1775  STREAM LAYER: c:\local\hecras\21_02_03\up_canning_cent3d.shp
1776  CROSS-SECTION LAYER: c:\local\hecras\21_02_03\up_can_xs3d.shp
1777  MAP PROJECTION: UTM
1778  PROJECTION ZONE: 50
1779  DATUM: AGD66
1780  VERTICAL DATUM:
1781  NUMBER OF REACHES:  19
1782  NUMBER OF CROSS-SECTIONS:  14206
1783END HEADER:
1784
1785Only the SURFACE LINE data of the following form will be utilised
1786  CROSS-SECTION:
1787    STREAM ID:Southern-Wungong
1788    REACH ID:Southern-Wungong
1789    STATION:19040.*
1790    CUT LINE:
1791      405548.671603161 , 6438142.7594925
1792      405734.536092045 , 6438326.10404912
1793      405745.130459356 , 6438331.48627354
1794      405813.89633823 , 6438368.6272789
1795    SURFACE LINE:
1796     405548.67,   6438142.76,   35.37
1797     405552.24,   6438146.28,   35.41
1798     405554.78,   6438148.78,   35.44
1799     405555.80,   6438149.79,   35.44
1800     405559.37,   6438153.31,   35.45
1801     405560.88,   6438154.81,   35.44
1802     405562.93,   6438156.83,   35.42
1803     405566.50,   6438160.35,   35.38
1804     405566.99,   6438160.83,   35.37
1805     ...
1806   END CROSS-SECTION
1807
1808    Convert to NetCDF pts format which is
1809
1810    points:  (Nx2) float array
1811    elevation: N float array
1812    """
1813
1814    import os
1815    from Scientific.IO.NetCDF import NetCDFFile
1816
1817    root = basename_in
1818
1819    # Get ASCII file
1820    infile = open(root + '.sdf', 'r')
1821
1822    if verbose: print 'Reading DEM from %s' %(root + '.sdf')
1823
1824    lines = infile.readlines()
1825    infile.close()
1826
1827    if verbose: print 'Converting to pts format'
1828
1829    # Scan through the header, picking up stuff we need.
1830    i = 0
1831    while lines[i].strip() == '' or lines[i].strip().startswith('#'):
1832        i += 1
1833
1834    assert lines[i].strip().upper() == 'BEGIN HEADER:'
1835    i += 1
1836
1837    assert lines[i].strip().upper().startswith('UNITS:')
1838    units = lines[i].strip().split()[1]
1839    i += 1
1840
1841    assert lines[i].strip().upper().startswith('DTM TYPE:')
1842    i += 1
1843
1844    assert lines[i].strip().upper().startswith('DTM:')
1845    i += 1
1846
1847    assert lines[i].strip().upper().startswith('STREAM')
1848    i += 1
1849
1850    assert lines[i].strip().upper().startswith('CROSS')
1851    i += 1
1852
1853    assert lines[i].strip().upper().startswith('MAP PROJECTION:')
1854    projection = lines[i].strip().split(':')[1]
1855    i += 1
1856
1857    assert lines[i].strip().upper().startswith('PROJECTION ZONE:')
1858    zone = int(lines[i].strip().split(':')[1])
1859    i += 1
1860
1861    assert lines[i].strip().upper().startswith('DATUM:')
1862    datum = lines[i].strip().split(':')[1]
1863    i += 1
1864
1865    assert lines[i].strip().upper().startswith('VERTICAL DATUM:')
1866    i += 1
1867
1868    assert lines[i].strip().upper().startswith('NUMBER OF REACHES:')
1869    i += 1
1870
1871    assert lines[i].strip().upper().startswith('NUMBER OF CROSS-SECTIONS:')
1872    number_of_cross_sections = int(lines[i].strip().split(':')[1])
1873    i += 1
1874
1875    # Now read all points
1876    points = []
1877    elevation = []
1878    for j, entries in enumerate(_read_hecras_cross_sections(lines[i:])):
1879        for k, entry in enumerate(entries):
1880            points.append(entry[:2])
1881            elevation.append(entry[2])
1882
1883    msg = 'Actual #number_of_cross_sections == %d, Reported as %d'\
1884          %(j+1, number_of_cross_sections)
1885    assert j+1 == number_of_cross_sections, msg
1886
1887    # Get output file, write PTS data
1888    if basename_out == None:
1889        ptsname = root + '.pts'
1890    else:
1891        ptsname = basename_out + '.pts'
1892
1893    geo_ref = Geo_reference(zone, 0, 0, datum, projection, units)
1894    geo = Geospatial_data(points, {"elevation":elevation},
1895                          verbose=verbose, geo_reference=geo_ref)
1896    geo.export_points_file(ptsname)
1897
1898
1899##
1900# @brief
1901# @param basename_in
1902# @param extra_name_out
1903# @param quantities
1904# @param timestep
1905# @param reduction
1906# @param cellsize
1907# @param number_of_decimal_places
1908# @param NODATA_value
1909# @param easting_min
1910# @param easting_max
1911# @param northing_min
1912# @param northing_max
1913# @param verbose
1914# @param origin
1915# @param datum
1916# @param format
1917# @return
1918def export_grid(basename_in, extra_name_out=None,
1919                quantities=None, # defaults to elevation
1920                timestep=None,
1921                reduction=None,
1922                cellsize=10,
1923                number_of_decimal_places=None,
1924                NODATA_value=-9999,
1925                easting_min=None,
1926                easting_max=None,
1927                northing_min=None,
1928                northing_max=None,
1929                verbose=False,
1930                origin=None,
1931                datum='WGS84',
1932                format='ers'):
1933    """Wrapper for sww2dem.
1934    See sww2dem to find out what most of the parameters do.
1935
1936    Quantities is a list of quantities.  Each quantity will be
1937    calculated for each sww file.
1938
1939    This returns the basenames of the files returned, which is made up
1940    of the dir and all of the file name, except the extension.
1941
1942    This function returns the names of the files produced.
1943
1944    It will also produce as many output files as there are input sww files.
1945    """
1946
1947    if quantities is None:
1948        quantities = ['elevation']
1949
1950    if type(quantities) is str:
1951            quantities = [quantities]
1952
1953    # How many sww files are there?
1954    dir, base = os.path.split(basename_in)
1955
1956    iterate_over = get_all_swwfiles(dir, base, verbose)
1957
1958    if dir == "":
1959        dir = "." # Unix compatibility
1960
1961    files_out = []
1962    for sww_file in iterate_over:
1963        for quantity in quantities:
1964            if extra_name_out is None:
1965                basename_out = sww_file + '_' + quantity
1966            else:
1967                basename_out = sww_file + '_' + quantity + '_' + extra_name_out
1968
1969            file_out = sww2dem(dir+sep+sww_file, dir+sep+basename_out,
1970                               quantity,
1971                               timestep,
1972                               reduction,
1973                               cellsize,
1974                               number_of_decimal_places,
1975                               NODATA_value,
1976                               easting_min,
1977                               easting_max,
1978                               northing_min,
1979                               northing_max,
1980                               verbose,
1981                               origin,
1982                               datum,
1983                               format)
1984            files_out.append(file_out)
1985    return files_out
1986
1987
1988##
1989# @brief
1990# @param production_dirs
1991# @param output_dir
1992# @param scenario_name
1993# @param gauges_dir_name
1994# @param plot_quantity
1995# @param generate_fig
1996# @param reportname
1997# @param surface
1998# @param time_min
1999# @param time_max
2000# @param title_on
2001# @param verbose
2002# @param nodes
2003def get_timeseries(production_dirs, output_dir, scenario_name, gauges_dir_name,
2004                   plot_quantity, generate_fig=False,
2005                   reportname=None, surface=False, time_min=None,
2006                   time_max=None, title_on=False, verbose=True,
2007                   nodes=None):
2008    """
2009    nodes - number of processes used.
2010
2011    warning - this function has no tests
2012    """
2013
2014    if reportname == None:
2015        report = False
2016    else:
2017        report = True
2018
2019    if nodes is None:
2020        is_parallel = False
2021    else:
2022        is_parallel = True
2023
2024    # Generate figures
2025    swwfiles = {}
2026    if is_parallel is True:
2027        for i in range(nodes):
2028            print 'Sending node %d of %d' % (i, nodes)
2029            swwfiles = {}
2030            if not reportname == None:
2031                reportname = report_name + '_%s' % i
2032            for label_id in production_dirs.keys():
2033                if label_id == 'boundaries':
2034                    swwfile = best_boundary_sww
2035                else:
2036                    file_loc = output_dir + label_id + sep
2037                    sww_extra = '_P%s_%s' % (i, nodes)
2038                    swwfile = file_loc + scenario_name + sww_extra + '.sww'
2039                    print 'swwfile', swwfile
2040                    swwfiles[swwfile] = label_id
2041
2042                texname, elev_output = sww2timeseries(swwfiles,
2043                                              gauges_dir_name,
2044                                              production_dirs,
2045                                              report=report,
2046                                              reportname=reportname,
2047                                              plot_quantity=plot_quantity,
2048                                              generate_fig=generate_fig,
2049                                              surface=surface,
2050                                              time_min=time_min,
2051                                              time_max=time_max,
2052                                              title_on=title_on,
2053                                              verbose=verbose)
2054    else:
2055        for label_id in production_dirs.keys():
2056            if label_id == 'boundaries':
2057                print 'boundaries'
2058                file_loc = project.boundaries_in_dir
2059                swwfile = project.boundaries_dir_name3 + '.sww'
2060                #  swwfile = boundary_dir_filename
2061            else:
2062                file_loc = output_dir + label_id + sep
2063                swwfile = file_loc + scenario_name + '.sww'
2064            swwfiles[swwfile] = label_id
2065
2066        texname, elev_output = sww2timeseries(swwfiles,
2067                                              gauges_dir_name,
2068                                              production_dirs,
2069                                              report=report,
2070                                              reportname=reportname,
2071                                              plot_quantity=plot_quantity,
2072                                              generate_fig=generate_fig,
2073                                              surface=surface,
2074                                              time_min=time_min,
2075                                              time_max=time_max,
2076                                              title_on=title_on,
2077                                              verbose=verbose)
2078
2079
2080##
2081# @brief Convert SWW file to DEM file (.asc or .ers).
2082# @param basename_in
2083# @param basename_out
2084# @param quantity
2085# @param timestep
2086# @param reduction
2087# @param cellsize 
2088# @param number_of_decimal_places
2089# @param NODATA_value
2090# @param easting_min
2091# @param easting_max
2092# @param northing_min
2093# @param northing_max
2094# @param verbose
2095# @param origin
2096# @param datum
2097# @param format
2098# @return
2099def sww2dem(basename_in, basename_out=None,
2100            quantity=None, # defaults to elevation
2101            timestep=None,
2102            reduction=None,
2103            cellsize=10,
2104            number_of_decimal_places=None,
2105            NODATA_value=-9999,
2106            easting_min=None,
2107            easting_max=None,
2108            northing_min=None,
2109            northing_max=None,
2110            verbose=False,
2111            origin=None,
2112            datum='WGS84',
2113            format='ers',
2114            block_size=None):
2115    """Read SWW file and convert to Digitial Elevation model format
2116    (.asc or .ers)
2117
2118    Example (ASC):
2119    ncols         3121
2120    nrows         1800
2121    xllcorner     722000
2122    yllcorner     5893000
2123    cellsize      25
2124    NODATA_value  -9999
2125    138.3698 137.4194 136.5062 135.5558 ..........
2126
2127    The number of decimal places can be specified by the user to save
2128    on disk space requirements by specifying in the call to sww2dem.
2129
2130    Also write accompanying file with same basename_in but extension .prj
2131    used to fix the UTM zone, datum, false northings and eastings.
2132
2133    The prj format is assumed to be as
2134
2135    Projection    UTM
2136    Zone          56
2137    Datum         WGS84
2138    Zunits        NO
2139    Units         METERS
2140    Spheroid      WGS84
2141    Xshift        0.0000000000
2142    Yshift        10000000.0000000000
2143    Parameters
2144
2145
2146    The parameter quantity must be the name of an existing quantity or
2147    an expression involving existing quantities. The default is
2148    'elevation'. Quantity is not a list of quantities.
2149
2150    if timestep (an index) is given, output quantity at that timestep
2151
2152    if reduction is given use that to reduce quantity over all timesteps.
2153
2154    datum
2155
2156    format can be either 'asc' or 'ers'
2157    block_size - sets the number of slices along the non-time axis to
2158                 process in one block.
2159    """
2160
2161    import sys
2162
2163    from anuga.utilities.polygon import inside_polygon, outside_polygon, \
2164         separate_points_by_polygon
2165    from anuga.abstract_2d_finite_volumes.util import \
2166         apply_expression_to_dictionary
2167
2168    msg = 'Format must be either asc or ers'
2169    assert format.lower() in ['asc', 'ers'], msg
2170
2171    false_easting = 500000
2172    false_northing = 10000000
2173
2174    if quantity is None:
2175        quantity = 'elevation'
2176
2177    if reduction is None:
2178        reduction = max
2179
2180    if basename_out is None:
2181        basename_out = basename_in + '_%s' % quantity
2182
2183    if quantity_formula.has_key(quantity):
2184        quantity = quantity_formula[quantity]
2185
2186    if number_of_decimal_places is None:
2187        number_of_decimal_places = 3
2188
2189    if block_size is None:
2190        block_size = DEFAULT_BLOCK_SIZE
2191
2192    # Read SWW file
2193    swwfile = basename_in + '.sww'
2194    demfile = basename_out + '.' + format
2195    # Note the use of a .ers extension is optional (write_ermapper_grid will
2196    # deal with either option
2197
2198    # Read sww file
2199    if verbose:
2200        print 'Reading from %s' % swwfile
2201        print 'Output directory is %s' % basename_out
2202
2203    from Scientific.IO.NetCDF import NetCDFFile
2204    fid = NetCDFFile(swwfile)
2205
2206    #Get extent and reference
2207    x = fid.variables['x'][:]
2208    y = fid.variables['y'][:]
2209    volumes = fid.variables['volumes'][:]
2210    if timestep is not None:
2211        times = fid.variables['time'][timestep]
2212    else:
2213        times = fid.variables['time'][:]
2214
2215    number_of_timesteps = fid.dimensions['number_of_timesteps']
2216    number_of_points = fid.dimensions['number_of_points']
2217
2218    if origin is None:
2219        # Get geo_reference
2220        # sww files don't have to have a geo_ref
2221        try:
2222            geo_reference = Geo_reference(NetCDFObject=fid)
2223        except AttributeError, e:
2224            geo_reference = Geo_reference() # Default georef object
2225
2226        xllcorner = geo_reference.get_xllcorner()
2227        yllcorner = geo_reference.get_yllcorner()
2228        zone = geo_reference.get_zone()
2229    else:
2230        zone = origin[0]
2231        xllcorner = origin[1]
2232        yllcorner = origin[2]
2233
2234    # FIXME: Refactor using code from Interpolation_function.statistics
2235    # (in interpolate.py)
2236    # Something like print swwstats(swwname)
2237    if verbose:
2238        print '------------------------------------------------'
2239        print 'Statistics of SWW file:'
2240        print '  Name: %s' %swwfile
2241        print '  Reference:'
2242        print '    Lower left corner: [%f, %f]'\
2243              %(xllcorner, yllcorner)
2244        if timestep is not None:
2245            print '    Time: %f' %(times)
2246        else:
2247            print '    Start time: %f' %fid.starttime[0]
2248        print '  Extent:'
2249        print '    x [m] in [%f, %f], len(x) == %d'\
2250              %(num.min(x), num.max(x), len(x.flat))
2251        print '    y [m] in [%f, %f], len(y) == %d'\
2252              %(num.min(y), num.max(y), len(y.flat))
2253        if timestep is not None:
2254            print '    t [s] = %f, len(t) == %d' %(times, 1)
2255        else:
2256            print '    t [s] in [%f, %f], len(t) == %d'\
2257              %(min(times), max(times), len(times))
2258        print '  Quantities [SI units]:'
2259        # Comment out for reduced memory consumption
2260        for name in ['stage', 'xmomentum', 'ymomentum']:
2261            q = fid.variables[name][:].flatten()
2262            if timestep is not None:
2263                q = q[timestep*len(x):(timestep+1)*len(x)]
2264            if verbose: print '    %s in [%f, %f]' %(name, min(q), max(q))
2265        for name in ['elevation']:
2266            q = fid.variables[name][:].flatten()
2267            if verbose: print '    %s in [%f, %f]' %(name, min(q), max(q))
2268
2269    # Get the variables in the supplied expression.
2270    # This may throw a SyntaxError exception.
2271    var_list = get_vars_in_expression(quantity)
2272
2273    # Check that we have the required variables in the SWW file.
2274    missing_vars = []
2275    for name in var_list:
2276        try:
2277            _ = fid.variables[name]
2278        except:
2279            missing_vars.append(name)
2280    if missing_vars:
2281        msg = ("In expression '%s', variables %s are not in the SWW file '%s'"
2282               % (quantity, swwfile))
2283        raise Exception, msg
2284
2285    # Create result array and start filling, block by block.
2286    result = num.zeros(number_of_points, num.float)
2287
2288    for start_slice in xrange(0, number_of_points, block_size):
2289        # limit slice size to array end if at last block
2290        end_slice = min(start_slice + block_size, number_of_points)
2291       
2292        # get slices of all required variables
2293        q_dict = {}
2294        for name in var_list:
2295            # check if variable has time axis
2296            if len(fid.variables[name].shape) == 2:
2297                q_dict[name] = fid.variables[name][:,start_slice:end_slice]
2298            else:       # no time axis
2299                q_dict[name] = fid.variables[name][start_slice:end_slice]
2300
2301        # Evaluate expression with quantities found in SWW file
2302        res = apply_expression_to_dictionary(quantity, q_dict)
2303
2304        if len(res.shape) == 2:
2305            new_res = num.zeros(res.shape[1], num.float)
2306            for k in xrange(res.shape[1]):
2307                new_res[k] = reduction(res[:,k])
2308            res = new_res
2309
2310        result[start_slice:end_slice] = res
2311                                   
2312    #Post condition: Now q has dimension: number_of_points
2313    assert len(result.shape) == 1
2314    assert result.shape[0] == number_of_points
2315
2316    if verbose:
2317        print 'Processed values for %s are in [%f, %f]' % \
2318              (quantity, min(result), max(result))
2319
2320    #Create grid and update xll/yll corner and x,y
2321    #Relative extent
2322    if easting_min is None:
2323        xmin = min(x)
2324    else:
2325        xmin = easting_min - xllcorner
2326
2327    if easting_max is None:
2328        xmax = max(x)
2329    else:
2330        xmax = easting_max - xllcorner
2331
2332    if northing_min is None:
2333        ymin = min(y)
2334    else:
2335        ymin = northing_min - yllcorner
2336
2337    if northing_max is None:
2338        ymax = max(y)
2339    else:
2340        ymax = northing_max - yllcorner
2341
2342    msg = 'xmax must be greater than or equal to xmin.\n'
2343    msg += 'I got xmin = %f, xmax = %f' %(xmin, xmax)
2344    assert xmax >= xmin, msg
2345
2346    msg = 'ymax must be greater than or equal to xmin.\n'
2347    msg += 'I got ymin = %f, ymax = %f' %(ymin, ymax)
2348    assert ymax >= ymin, msg
2349
2350    if verbose: print 'Creating grid'
2351    ncols = int((xmax-xmin)/cellsize) + 1
2352    nrows = int((ymax-ymin)/cellsize) + 1
2353
2354    #New absolute reference and coordinates
2355    newxllcorner = xmin + xllcorner
2356    newyllcorner = ymin + yllcorner
2357
2358    x = x + xllcorner - newxllcorner
2359    y = y + yllcorner - newyllcorner
2360
2361    vertex_points = num.concatenate ((x[:,num.newaxis], y[:,num.newaxis]), axis=1)
2362    assert len(vertex_points.shape) == 2
2363
2364    grid_points = num.zeros ((ncols*nrows, 2), num.float)
2365
2366    for i in xrange(nrows):
2367        if format.lower() == 'asc':
2368            yg = i * cellsize
2369        else:
2370        #this will flip the order of the y values for ers
2371            yg = (nrows-i) * cellsize
2372
2373        for j in xrange(ncols):
2374            xg = j * cellsize
2375            k = i*ncols + j
2376
2377            grid_points[k, 0] = xg
2378            grid_points[k, 1] = yg
2379
2380    #Interpolate
2381    from anuga.fit_interpolate.interpolate import Interpolate
2382
2383    # Remove loners from vertex_points, volumes here
2384    vertex_points, volumes = remove_lone_verts(vertex_points, volumes)
2385    #export_mesh_file('monkey.tsh',{'vertices':vertex_points, 'triangles':volumes})
2386    #import sys; sys.exit()
2387    interp = Interpolate(vertex_points, volumes, verbose = verbose)
2388
2389    #Interpolate using quantity values
2390    if verbose: print 'Interpolating'
2391    grid_values = interp.interpolate(result, grid_points).flatten()
2392
2393    if verbose:
2394        print 'Interpolated values are in [%f, %f]' %(num.min(grid_values),
2395                                                      num.max(grid_values))
2396
2397    #Assign NODATA_value to all points outside bounding polygon (from interpolation mesh)
2398    P = interp.mesh.get_boundary_polygon()
2399    outside_indices = outside_polygon(grid_points, P, closed=True)
2400
2401    for i in outside_indices:
2402        grid_values[i] = NODATA_value
2403
2404    if format.lower() == 'ers':
2405        # setup ERS header information
2406        grid_values = num.reshape(grid_values, (nrows, ncols))
2407        header = {}
2408        header['datum'] = '"' + datum + '"'
2409        # FIXME The use of hardwired UTM and zone number needs to be made optional
2410        # FIXME Also need an automatic test for coordinate type (i.e. EN or LL)
2411        header['projection'] = '"UTM-' + str(zone) + '"'
2412        header['coordinatetype'] = 'EN'
2413        if header['coordinatetype'] == 'LL':
2414            header['longitude'] = str(newxllcorner)
2415            header['latitude'] = str(newyllcorner)
2416        elif header['coordinatetype'] == 'EN':
2417            header['eastings'] = str(newxllcorner)
2418            header['northings'] = str(newyllcorner)
2419        header['nullcellvalue'] = str(NODATA_value)
2420        header['xdimension'] = str(cellsize)
2421        header['ydimension'] = str(cellsize)
2422        header['value'] = '"' + quantity + '"'
2423        #header['celltype'] = 'IEEE8ByteReal'  #FIXME: Breaks unit test
2424
2425        #Write
2426        if verbose: print 'Writing %s' %demfile
2427
2428        import ermapper_grids
2429
2430        ermapper_grids.write_ermapper_grid(demfile, grid_values, header)
2431
2432        fid.close()
2433    else:
2434        #Write to Ascii format
2435        #Write prj file
2436        prjfile = basename_out + '.prj'
2437
2438        if verbose: print 'Writing %s' %prjfile
2439        prjid = open(prjfile, 'w')
2440        prjid.write('Projection    %s\n' %'UTM')
2441        prjid.write('Zone          %d\n' %zone)
2442        prjid.write('Datum         %s\n' %datum)
2443        prjid.write('Zunits        NO\n')
2444        prjid.write('Units         METERS\n')
2445        prjid.write('Spheroid      %s\n' %datum)
2446        prjid.write('Xshift        %d\n' %false_easting)
2447        prjid.write('Yshift        %d\n' %false_northing)
2448        prjid.write('Parameters\n')
2449        prjid.close()
2450
2451        if verbose: print 'Writing %s' %demfile
2452
2453        ascid = open(demfile, 'w')
2454
2455        ascid.write('ncols         %d\n' %ncols)
2456        ascid.write('nrows         %d\n' %nrows)
2457        ascid.write('xllcorner     %d\n' %newxllcorner)
2458        ascid.write('yllcorner     %d\n' %newyllcorner)
2459        ascid.write('cellsize      %f\n' %cellsize)
2460        ascid.write('NODATA_value  %d\n' %NODATA_value)
2461
2462        #Get bounding polygon from mesh
2463        #P = interp.mesh.get_boundary_polygon()
2464        #inside_indices = inside_polygon(grid_points, P)
2465        for i in range(nrows):
2466            if verbose and i % ((nrows+10)/10) == 0:
2467                print 'Doing row %d of %d' %(i, nrows)
2468
2469            base_index = (nrows-i-1)*ncols
2470
2471            slice = grid_values[base_index:base_index+ncols]
2472            #s = array2string(slice, max_line_width=sys.maxint)
2473            s = num.array2string(slice, max_line_width=sys.maxint,
2474                                 precision=number_of_decimal_places)
2475            ascid.write(s[1:-1] + '\n')
2476
2477        #Close
2478        ascid.close()
2479        fid.close()
2480
2481        return basename_out
2482
2483################################################################################
2484# Obsolete functions - mainatined for backwards compatibility
2485################################################################################
2486
2487##
2488# @brief
2489# @param basename_in
2490# @param basename_out
2491# @param quantity
2492# @param timestep
2493# @param reduction
2494# @param cellsize
2495# @param verbose
2496# @param origin
2497# @note OBSOLETE - use sww2dem() instead.
2498def sww2asc(basename_in, basename_out = None,
2499            quantity = None,
2500            timestep = None,
2501            reduction = None,
2502            cellsize = 10,
2503            verbose = False,
2504            origin = None):
2505    print 'sww2asc will soon be obsoleted - please use sww2dem'
2506    sww2dem(basename_in,
2507            basename_out = basename_out,
2508            quantity = quantity,
2509            timestep = timestep,
2510            reduction = reduction,
2511            cellsize = cellsize,
2512            number_of_decimal_places = number_of_decimal_places,
2513            verbose = verbose,
2514            origin = origin,
2515        datum = 'WGS84',
2516        format = 'asc')
2517
2518
2519##
2520# @brief
2521# @param basename_in
2522# @param basename_out
2523# @param quantity
2524# @param timestep
2525# @param reduction
2526# @param cellsize
2527# @param verbose
2528# @param origin
2529# @param datum
2530# @note OBSOLETE - use sww2dem() instead.
2531def sww2ers(basename_in, basename_out=None,
2532            quantity=None,
2533            timestep=None,
2534            reduction=None,
2535            cellsize=10,
2536            verbose=False,
2537            origin=None,
2538            datum='WGS84'):
2539    print 'sww2ers will soon be obsoleted - please use sww2dem'
2540    sww2dem(basename_in,
2541            basename_out=basename_out,
2542            quantity=quantity,
2543            timestep=timestep,
2544            reduction=reduction,
2545            cellsize=cellsize,
2546            number_of_decimal_places=number_of_decimal_places,
2547            verbose=verbose,
2548            origin=origin,
2549            datum=datum,
2550            format='ers')
2551
2552################################################################################
2553# End of obsolete functions
2554################################################################################
2555
2556
2557##
2558# @brief Convert SWW file to PTS file (at selected points).
2559# @param basename_in Stem name of input SWW file.
2560# @param basename_out Stem name of output file.
2561# @param data_points If given, points where quantity is to be computed.
2562# @param quantity Name (or expression) of existing quantity(s) (def: elevation).
2563# @param timestep If given, output quantity at that timestep.
2564# @param reduction If given, reduce quantity by this factor.
2565# @param NODATA_value The NODATA value (default -9999).
2566# @param verbose True if this function is to be verbose.
2567# @param origin ??
2568def sww2pts(basename_in, basename_out=None,
2569            data_points=None,
2570            quantity=None,
2571            timestep=None,
2572            reduction=None,
2573            NODATA_value=-9999,
2574            verbose=False,
2575            origin=None):
2576    """Read SWW file and convert to interpolated values at selected points
2577
2578    The parameter 'quantity' must be the name of an existing quantity or
2579    an expression involving existing quantities. The default is 'elevation'.
2580
2581    if timestep (an index) is given, output quantity at that timestep.
2582
2583    if reduction is given use that to reduce quantity over all timesteps.
2584
2585    data_points (Nx2 array) give locations of points where quantity is to
2586    be computed.
2587    """
2588
2589    import sys
2590    from anuga.utilities.polygon import inside_polygon, outside_polygon, \
2591             separate_points_by_polygon
2592    from anuga.abstract_2d_finite_volumes.util import \
2593             apply_expression_to_dictionary
2594    from anuga.geospatial_data.geospatial_data import Geospatial_data
2595
2596    if quantity is None:
2597        quantity = 'elevation'
2598
2599    if reduction is None:
2600        reduction = max
2601
2602    if basename_out is None:
2603        basename_out = basename_in + '_%s' % quantity
2604
2605    swwfile = basename_in + '.sww'
2606    ptsfile = basename_out + '.pts'
2607
2608    # Read sww file
2609    if verbose: print 'Reading from %s' % swwfile
2610    from Scientific.IO.NetCDF import NetCDFFile
2611    fid = NetCDFFile(swwfile)
2612
2613    # Get extent and reference
2614    x = fid.variables['x'][:]
2615    y = fid.variables['y'][:]
2616    volumes = fid.variables['volumes'][:]
2617
2618    number_of_timesteps = fid.dimensions['number_of_timesteps']
2619    number_of_points = fid.dimensions['number_of_points']
2620    if origin is None:
2621        # Get geo_reference
2622        # sww files don't have to have a geo_ref
2623        try:
2624            geo_reference = Geo_reference(NetCDFObject=fid)
2625        except AttributeError, e:
2626            geo_reference = Geo_reference() # Default georef object
2627
2628        xllcorner = geo_reference.get_xllcorner()
2629        yllcorner = geo_reference.get_yllcorner()
2630        zone = geo_reference.get_zone()
2631    else:
2632        zone = origin[0]
2633        xllcorner = origin[1]
2634        yllcorner = origin[2]
2635
2636    # FIXME: Refactor using code from file_function.statistics
2637    # Something like print swwstats(swwname)
2638    if verbose:
2639        x = fid.variables['x'][:]
2640        y = fid.variables['y'][:]
2641        times = fid.variables['time'][:]
2642        print '------------------------------------------------'
2643        print 'Statistics of SWW file:'
2644        print '  Name: %s' % swwfile
2645        print '  Reference:'
2646        print '    Lower left corner: [%f, %f]' % (xllcorner, yllcorner)
2647        print '    Start time: %f' % fid.starttime[0]
2648        print '  Extent:'
2649        print '    x [m] in [%f, %f], len(x) == %d' \
2650              % (num.min(x), num.max(x), len(x.flat))
2651        print '    y [m] in [%f, %f], len(y) == %d' \
2652              % (num.min(y), num.max(y), len(y.flat))
2653        print '    t [s] in [%f, %f], len(t) == %d' \
2654              % (min(times), max(times), len(times))
2655        print '  Quantities [SI units]:'
2656        for name in ['stage', 'xmomentum', 'ymomentum', 'elevation']:
2657            q = fid.variables[name][:].flat
2658            print '    %s in [%f, %f]' % (name, min(q), max(q))
2659
2660    # Get quantity and reduce if applicable
2661    if verbose: print 'Processing quantity %s' % quantity
2662
2663    # Turn NetCDF objects into numeric arrays
2664    quantity_dict = {}
2665    for name in fid.variables.keys():
2666        quantity_dict[name] = fid.variables[name][:]
2667
2668    # Convert quantity expression to quantities found in sww file
2669    q = apply_expression_to_dictionary(quantity, quantity_dict)
2670
2671    if len(q.shape) == 2:
2672        # q has a time component and needs to be reduced along
2673        # the temporal dimension
2674        if verbose: print 'Reducing quantity %s' % quantity
2675
2676        q_reduced = num.zeros(number_of_points, num.float)
2677        for k in range(number_of_points):
2678            q_reduced[k] = reduction(q[:,k])
2679        q = q_reduced
2680
2681    # Post condition: Now q has dimension: number_of_points
2682    assert len(q.shape) == 1
2683    assert q.shape[0] == number_of_points
2684
2685    if verbose:
2686        print 'Processed values for %s are in [%f, %f]' \
2687              % (quantity, min(q), max(q))
2688
2689    # Create grid and update xll/yll corner and x,y
2690    vertex_points = num.concatenate((x[:, num.newaxis], y[:, num.newaxis]), axis=1)
2691    assert len(vertex_points.shape) == 2
2692
2693    # Interpolate
2694    from anuga.fit_interpolate.interpolate import Interpolate
2695    interp = Interpolate(vertex_points, volumes, verbose=verbose)
2696
2697    # Interpolate using quantity values
2698    if verbose: print 'Interpolating'
2699    interpolated_values = interp.interpolate(q, data_points).flatten()    #????#
2700
2701    if verbose:
2702        print ('Interpolated values are in [%f, %f]'
2703               % (num.min(interpolated_values), num.max(interpolated_values)))
2704
2705    # Assign NODATA_value to all points outside bounding polygon
2706    # (from interpolation mesh)
2707    P = interp.mesh.get_boundary_polygon()
2708    outside_indices = outside_polygon(data_points, P, closed=True)
2709
2710    for i in outside_indices:
2711        interpolated_values[i] = NODATA_value
2712
2713    # Store results
2714    G = Geospatial_data(data_points=data_points, attributes=interpolated_values)
2715
2716    G.export_points_file(ptsfile, absolute = True)
2717
2718    fid.close()
2719
2720
2721##
2722# @brief Convert ASC file to DEM file.
2723# @param basename_in Stem of input filename.
2724# @param basename_out Stem of output filename.
2725# @param use_cache ??
2726# @param verbose True if this function is to be verbose.
2727# @return
2728# @note A PRJ file with same stem basename must exist and is used to fix the
2729#       UTM zone, datum, false northings and eastings.
2730def convert_dem_from_ascii2netcdf(basename_in, basename_out=None,
2731                                  use_cache=False,
2732                                  verbose=False):
2733    """Read Digital Elevation model from the following ASCII format (.asc)
2734
2735    Example:
2736    ncols         3121
2737    nrows         1800
2738    xllcorner     722000
2739    yllcorner     5893000
2740    cellsize      25
2741    NODATA_value  -9999
2742    138.3698 137.4194 136.5062 135.5558 ..........
2743
2744    Convert basename_in + '.asc' to NetCDF format (.dem)
2745    mimicking the ASCII format closely.
2746
2747    An accompanying file with same basename_in but extension .prj must exist
2748    and is used to fix the UTM zone, datum, false northings and eastings.
2749
2750    The prj format is assumed to be as
2751
2752    Projection    UTM
2753    Zone          56
2754    Datum         WGS84
2755    Zunits        NO
2756    Units         METERS
2757    Spheroid      WGS84
2758    Xshift        0.0000000000
2759    Yshift        10000000.0000000000
2760    Parameters
2761    """
2762
2763    kwargs = {'basename_out': basename_out, 'verbose': verbose}
2764
2765    if use_cache is True:
2766        from caching import cache
2767        result = cache(_convert_dem_from_ascii2netcdf, basename_in, kwargs,
2768                       dependencies=[basename_in + '.asc',
2769                                     basename_in + '.prj'],
2770                       verbose=verbose)
2771
2772    else:
2773        result = apply(_convert_dem_from_ascii2netcdf, [basename_in], kwargs)
2774
2775    return result
2776
2777
2778##
2779# @brief Convert an ASC file to a DEM file.
2780# @param basename_in Stem of input filename.
2781# @param basename_out Stem of output filename.
2782# @param verbose True if this function is to be verbose.
2783def _convert_dem_from_ascii2netcdf(basename_in, basename_out = None,
2784                                  verbose = False):
2785    """Read Digital Elevation model from the following ASCII format (.asc)
2786
2787    Internal function. See public function convert_dem_from_ascii2netcdf
2788    for details.
2789    """
2790
2791    import os
2792    from Scientific.IO.NetCDF import NetCDFFile
2793
2794    root = basename_in
2795
2796    # Read Meta data
2797    if verbose: print 'Reading METADATA from %s' % root + '.prj'
2798
2799    metadatafile = open(root + '.prj')
2800    metalines = metadatafile.readlines()
2801    metadatafile.close()
2802
2803    L = metalines[0].strip().split()
2804    assert L[0].strip().lower() == 'projection'
2805    projection = L[1].strip()                   #TEXT
2806
2807    L = metalines[1].strip().split()
2808    assert L[0].strip().lower() == 'zone'
2809    zone = int(L[1].strip())
2810
2811    L = metalines[2].strip().split()
2812    assert L[0].strip().lower() == 'datum'
2813    datum = L[1].strip()                        #TEXT
2814
2815    L = metalines[3].strip().split()
2816    assert L[0].strip().lower() == 'zunits'     #IGNORE
2817    zunits = L[1].strip()                       #TEXT
2818
2819    L = metalines[4].strip().split()
2820    assert L[0].strip().lower() == 'units'
2821    units = L[1].strip()                        #TEXT
2822
2823    L = metalines[5].strip().split()
2824    assert L[0].strip().lower() == 'spheroid'   #IGNORE
2825    spheroid = L[1].strip()                     #TEXT
2826
2827    L = metalines[6].strip().split()
2828    assert L[0].strip().lower() == 'xshift'
2829    false_easting = float(L[1].strip())
2830
2831    L = metalines[7].strip().split()
2832    assert L[0].strip().lower() == 'yshift'
2833    false_northing = float(L[1].strip())
2834
2835    #Read DEM data
2836    datafile = open(basename_in + '.asc')
2837
2838    if verbose: print 'Reading DEM from %s' % basename_in + '.asc'
2839
2840    lines = datafile.readlines()
2841    datafile.close()
2842
2843    if verbose: print 'Got', len(lines), ' lines'
2844
2845    ncols = int(lines[0].split()[1].strip())
2846    nrows = int(lines[1].split()[1].strip())
2847
2848    # Do cellsize (line 4) before line 2 and 3
2849    cellsize = float(lines[4].split()[1].strip())
2850
2851    # Checks suggested by Joaquim Luis
2852    # Our internal representation of xllcorner
2853    # and yllcorner is non-standard.
2854    xref = lines[2].split()
2855    if xref[0].strip() == 'xllcorner':
2856        xllcorner = float(xref[1].strip()) # + 0.5*cellsize # Correct offset
2857    elif xref[0].strip() == 'xllcenter':
2858        xllcorner = float(xref[1].strip())
2859    else:
2860        msg = 'Unknown keyword: %s' % xref[0].strip()
2861        raise Exception, msg
2862
2863    yref = lines[3].split()
2864    if yref[0].strip() == 'yllcorner':
2865        yllcorner = float(yref[1].strip()) # + 0.5*cellsize # Correct offset
2866    elif yref[0].strip() == 'yllcenter':
2867        yllcorner = float(yref[1].strip())
2868    else:
2869        msg = 'Unknown keyword: %s' % yref[0].strip()
2870        raise Exception, msg
2871
2872    NODATA_value = int(lines[5].split()[1].strip())
2873
2874    assert len(lines) == nrows + 6
2875
2876    if basename_out == None:
2877        netcdfname = root + '.dem'
2878    else:
2879        netcdfname = basename_out + '.dem'
2880
2881    if verbose: print 'Store to NetCDF file %s' % netcdfname
2882
2883    # NetCDF file definition
2884    fid = NetCDFFile(netcdfname, netcdf_mode_w)
2885
2886    #Create new file
2887    fid.institution = 'Geoscience Australia'
2888    fid.description = 'NetCDF DEM format for compact and portable storage ' \
2889                      'of spatial point data'
2890
2891    fid.ncols = ncols
2892    fid.nrows = nrows
2893    fid.xllcorner = xllcorner
2894    fid.yllcorner = yllcorner
2895    fid.cellsize = cellsize
2896    fid.NODATA_value = NODATA_value
2897
2898    fid.zone = zone
2899    fid.false_easting = false_easting
2900    fid.false_northing = false_northing
2901    fid.projection = projection
2902    fid.datum = datum
2903    fid.units = units
2904
2905    # dimension definitions
2906    fid.createDimension('number_of_rows', nrows)
2907    fid.createDimension('number_of_columns', ncols)
2908
2909    # variable definitions
2910    fid.createVariable('elevation', netcdf_float, ('number_of_rows',
2911                                                   'number_of_columns'))
2912
2913    # Get handles to the variables
2914    elevation = fid.variables['elevation']
2915
2916    #Store data
2917    n = len(lines[6:])
2918    for i, line in enumerate(lines[6:]):
2919        fields = line.split()
2920        if verbose and i % ((n+10)/10) == 0:
2921            print 'Processing row %d of %d' % (i, nrows)
2922        elevation[i, :] = num.array([float(x) for x in fields])
2923
2924    fid.close()
2925
2926
2927##
2928# @brief Convert 'ferret' file to SWW file.
2929# @param basename_in Stem of input filename.
2930# @param basename_out Stem of output filename.
2931# @param verbose True if this function is to be verbose.
2932# @param minlat
2933# @param maxlat
2934# @param minlon
2935# @param maxlon
2936# @param mint
2937# @param maxt
2938# @param mean_stage
2939# @param origin
2940# @param zscale
2941# @param fail_on_NaN
2942# @param NaN_filler
2943# @param elevation
2944# @param inverted_bathymetry
2945def ferret2sww(basename_in, basename_out=None,
2946               verbose=False,
2947               minlat=None, maxlat=None,
2948               minlon=None, maxlon=None,
2949               mint=None, maxt=None, mean_stage=0,
2950               origin=None, zscale=1,
2951               fail_on_NaN=True,
2952               NaN_filler=0,
2953               elevation=None,
2954               inverted_bathymetry=True
2955               ): #FIXME: Bathymetry should be obtained
2956                                  #from MOST somehow.
2957                                  #Alternatively from elsewhere
2958                                  #or, as a last resort,
2959                                  #specified here.
2960                                  #The value of -100 will work
2961                                  #for the Wollongong tsunami
2962                                  #scenario but is very hacky
2963    """Convert MOST and 'Ferret' NetCDF format for wave propagation to
2964    sww format native to abstract_2d_finite_volumes.
2965
2966    Specify only basename_in and read files of the form
2967    basefilename_ha.nc, basefilename_ua.nc, basefilename_va.nc containing
2968    relative height, x-velocity and y-velocity, respectively.
2969
2970    Also convert latitude and longitude to UTM. All coordinates are
2971    assumed to be given in the GDA94 datum.
2972
2973    min's and max's: If omitted - full extend is used.
2974    To include a value min may equal it, while max must exceed it.
2975    Lat and lon are assuemd to be in decimal degrees
2976
2977    origin is a 3-tuple with geo referenced
2978    UTM coordinates (zone, easting, northing)
2979
2980    nc format has values organised as HA[TIME, LATITUDE, LONGITUDE]
2981    which means that longitude is the fastest
2982    varying dimension (row major order, so to speak)
2983
2984    ferret2sww uses grid points as vertices in a triangular grid
2985    counting vertices from lower left corner upwards, then right
2986    """
2987
2988    import os
2989    from Scientific.IO.NetCDF import NetCDFFile
2990
2991    precision = num.float
2992
2993    msg = 'Must use latitudes and longitudes for minlat, maxlon etc'
2994
2995    if minlat != None:
2996        assert -90 < minlat < 90 , msg
2997    if maxlat != None:
2998        assert -90 < maxlat < 90 , msg
2999        if minlat != None:
3000            assert maxlat > minlat
3001    if minlon != None:
3002        assert -180 < minlon < 180 , msg
3003    if maxlon != None:
3004        assert -180 < maxlon < 180 , msg
3005        if minlon != None:
3006            assert maxlon > minlon
3007
3008    # Get NetCDF data
3009    if verbose: print 'Reading files %s_*.nc' % basename_in
3010
3011    file_h = NetCDFFile(basename_in + '_ha.nc', netcdf_mode_r) # Wave amplitude (cm)
3012    file_u = NetCDFFile(basename_in + '_ua.nc', netcdf_mode_r) # Velocity (x) (cm/s)
3013    file_v = NetCDFFile(basename_in + '_va.nc', netcdf_mode_r) # Velocity (y) (cm/s)
3014    file_e = NetCDFFile(basename_in + '_e.nc', netcdf_mode_r)  # Elevation (z) (m)
3015
3016    if basename_out is None:
3017        swwname = basename_in + '.sww'
3018    else:
3019        swwname = basename_out + '.sww'
3020
3021    # Get dimensions of file_h
3022    for dimension in file_h.dimensions.keys():
3023        if dimension[:3] == 'LON':
3024            dim_h_longitude = dimension
3025        if dimension[:3] == 'LAT':
3026            dim_h_latitude = dimension
3027        if dimension[:4] == 'TIME':
3028            dim_h_time = dimension
3029
3030    times = file_h.variables[dim_h_time]
3031    latitudes = file_h.variables[dim_h_latitude]
3032    longitudes = file_h.variables[dim_h_longitude]
3033
3034    kmin, kmax, lmin, lmax = _get_min_max_indexes(latitudes[:],
3035                                                  longitudes[:],
3036                                                  minlat, maxlat,
3037                                                  minlon, maxlon)
3038    # get dimensions for file_e
3039    for dimension in file_e.dimensions.keys():
3040        if dimension[:3] == 'LON':
3041            dim_e_longitude = dimension
3042        if dimension[:3] == 'LAT':
3043            dim_e_latitude = dimension
3044
3045    # get dimensions for file_u
3046    for dimension in file_u.dimensions.keys():
3047        if dimension[:3] == 'LON':
3048            dim_u_longitude = dimension
3049        if dimension[:3] == 'LAT':
3050            dim_u_latitude = dimension
3051        if dimension[:4] == 'TIME':
3052            dim_u_time = dimension
3053
3054    # get dimensions for file_v
3055    for dimension in file_v.dimensions.keys():
3056        if dimension[:3] == 'LON':
3057            dim_v_longitude = dimension
3058        if dimension[:3] == 'LAT':
3059            dim_v_latitude = dimension
3060        if dimension[:4] == 'TIME':
3061            dim_v_time = dimension
3062
3063    # Precision used by most for lat/lon is 4 or 5 decimals
3064    e_lat = num.around(file_e.variables[dim_e_latitude][:], 5)
3065    e_lon = num.around(file_e.variables[dim_e_longitude][:], 5)
3066
3067    # Check that files are compatible
3068    assert num.allclose(latitudes, file_u.variables[dim_u_latitude])
3069    assert num.allclose(latitudes, file_v.variables[dim_v_latitude])
3070    assert num.allclose(latitudes, e_lat)
3071
3072    assert num.allclose(longitudes, file_u.variables[dim_u_longitude])
3073    assert num.allclose(longitudes, file_v.variables[dim_v_longitude])
3074    assert num.allclose(longitudes, e_lon)
3075
3076    if mint is None:
3077        jmin = 0
3078        mint = times[0]
3079    else:
3080        jmin = num.searchsorted(times, mint)
3081
3082    if maxt is None:
3083        jmax = len(times)
3084        maxt = times[-1]
3085    else:
3086        jmax = num.searchsorted(times, maxt)
3087
3088    kmin, kmax, lmin, lmax = _get_min_max_indexes(latitudes[:],
3089                                                  longitudes[:],
3090                                                  minlat, maxlat,
3091                                                  minlon, maxlon)
3092
3093
3094    times = times[jmin:jmax]
3095    latitudes = latitudes[kmin:kmax]
3096    longitudes = longitudes[lmin:lmax]
3097
3098    if verbose: print 'cropping'
3099
3100    zname = 'ELEVATION'
3101
3102    amplitudes = file_h.variables['HA'][jmin:jmax, kmin:kmax, lmin:lmax]
3103    uspeed = file_u.variables['UA'][jmin:jmax, kmin:kmax, lmin:lmax] #Lon
3104    vspeed = file_v.variables['VA'][jmin:jmax, kmin:kmax, lmin:lmax] #Lat
3105    elevations = file_e.variables[zname][kmin:kmax, lmin:lmax]
3106
3107    #    if latitudes2[0]==latitudes[0] and latitudes2[-1]==latitudes[-1]:
3108    #        elevations = file_e.variables['ELEVATION'][kmin:kmax, lmin:lmax]
3109    #    elif latitudes2[0]==latitudes[-1] and latitudes2[-1]==latitudes[0]:
3110    #        from numpy import asarray
3111    #        elevations=elevations.tolist()
3112    #        elevations.reverse()
3113    #        elevations=asarray(elevations)
3114    #    else:
3115    #        from numpy import asarray
3116    #        elevations=elevations.tolist()
3117    #        elevations.reverse()
3118    #        elevations=asarray(elevations)
3119    #        'print hmmm'
3120
3121    #Get missing values
3122    nan_ha = file_h.variables['HA'].missing_value[0]
3123    nan_ua = file_u.variables['UA'].missing_value[0]
3124    nan_va = file_v.variables['VA'].missing_value[0]
3125    if hasattr(file_e.variables[zname],'missing_value'):
3126        nan_e  = file_e.variables[zname].missing_value[0]
3127    else:
3128        nan_e = None
3129
3130    #Cleanup
3131    missing = (amplitudes == nan_ha)
3132    if num.sometrue (missing):
3133        if fail_on_NaN:
3134            msg = 'NetCDFFile %s contains missing values' \
3135                  % basename_in + '_ha.nc'
3136            raise DataMissingValuesError, msg
3137        else:
3138            amplitudes = amplitudes*(missing==0) + missing*NaN_filler
3139
3140    missing = (uspeed == nan_ua)
3141    if num.sometrue (missing):
3142        if fail_on_NaN:
3143            msg = 'NetCDFFile %s contains missing values' \
3144                  % basename_in + '_ua.nc'
3145            raise DataMissingValuesError, msg
3146        else:
3147            uspeed = uspeed*(missing==0) + missing*NaN_filler
3148
3149    missing = (vspeed == nan_va)
3150    if num.sometrue (missing):
3151        if fail_on_NaN:
3152            msg = 'NetCDFFile %s contains missing values' \
3153                  % basename_in + '_va.nc'
3154            raise DataMissingValuesError, msg
3155        else:
3156            vspeed = vspeed*(missing==0) + missing*NaN_filler
3157
3158    missing = (elevations == nan_e)
3159    if num.sometrue (missing):
3160        if fail_on_NaN:
3161            msg = 'NetCDFFile %s contains missing values' \
3162                  % basename_in + '_e.nc'
3163            raise DataMissingValuesError, msg
3164        else:
3165            elevations = elevations*(missing==0) + missing*NaN_filler
3166
3167    number_of_times = times.shape[0]
3168    number_of_latitudes = latitudes.shape[0]
3169    number_of_longitudes = longitudes.shape[0]
3170
3171    assert amplitudes.shape[0] == number_of_times
3172    assert amplitudes.shape[1] == number_of_latitudes
3173    assert amplitudes.shape[2] == number_of_longitudes
3174
3175    if verbose:
3176        print '------------------------------------------------'
3177        print 'Statistics:'
3178        print '  Extent (lat/lon):'
3179        print '    lat in [%f, %f], len(lat) == %d' \
3180              % (num.min(latitudes), num.max(latitudes), len(latitudes.flat))
3181        print '    lon in [%f, %f], len(lon) == %d' \
3182              % (num.min(longitudes), num.max(longitudes),
3183                 len(longitudes.flat))
3184        print '    t in [%f, %f], len(t) == %d' \
3185              % (num.min(times), num.max(times), len(times.flat))
3186
3187        q = amplitudes.flatten()
3188        name = 'Amplitudes (ha) [cm]'
3189        print %s in [%f, %f]' % (name, min(q), max(q))
3190
3191        q = uspeed.flatten()
3192        name = 'Speeds (ua) [cm/s]'
3193        print %s in [%f, %f]' % (name, min(q), max(q))
3194
3195        q = vspeed.flatten()
3196        name = 'Speeds (va) [cm/s]'
3197        print %s in [%f, %f]' % (name, min(q), max(q))
3198
3199        q = elevations.flatten()
3200        name = 'Elevations (e) [m]'
3201        print %s in [%f, %f]' % (name, min(q), max(q))
3202
3203    # print number_of_latitudes, number_of_longitudes
3204    number_of_points = number_of_latitudes * number_of_longitudes
3205    number_of_volumes = (number_of_latitudes-1) * (number_of_longitudes-1) * 2
3206
3207    file_h.close()
3208    file_u.close()
3209    file_v.close()
3210    file_e.close()
3211
3212    # NetCDF file definition
3213    outfile = NetCDFFile(swwname, netcdf_mode_w)
3214
3215    description = 'Converted from Ferret files: %s, %s, %s, %s' \
3216                  % (basename_in + '_ha.nc',
3217                     basename_in + '_ua.nc',
3218                     basename_in + '_va.nc',
3219                     basename_in + '_e.nc')
3220
3221    # Create new file
3222    starttime = times[0]
3223
3224    sww = Write_sww()
3225    sww.store_header(outfile, times, number_of_volumes,
3226                     number_of_points, description=description,
3227                     verbose=verbose, sww_precision=netcdf_float)
3228
3229    # Store
3230    from anuga.coordinate_transforms.redfearn import redfearn
3231    x = num.zeros(number_of_points, num.float)  #Easting
3232    y = num.zeros(number_of_points, num.float)  #Northing
3233
3234    if verbose: print 'Making triangular grid'
3235
3236    # Check zone boundaries
3237    refzone, _, _ = redfearn(latitudes[0], longitudes[0])
3238
3239    vertices = {}
3240    i = 0
3241    for k, lat in enumerate(latitudes):       # Y direction
3242        for l, lon in enumerate(longitudes):  # X direction
3243            vertices[l,k] = i
3244
3245            zone, easting, northing = redfearn(lat,lon)
3246
3247            #msg = 'Zone boundary crossed at longitude =', lon
3248            #assert zone == refzone, msg
3249            #print '%7.2f %7.2f %8.2f %8.2f' %(lon, lat, easting, northing)
3250            x[i] = easting
3251            y[i] = northing
3252            i += 1
3253
3254    #Construct 2 triangles per 'rectangular' element
3255    volumes = []
3256    for l in range(number_of_longitudes-1):    # X direction
3257        for k in range(number_of_latitudes-1): # Y direction
3258            v1 = vertices[l,k+1]
3259            v2 = vertices[l,k]
3260            v3 = vertices[l+1,k+1]
3261            v4 = vertices[l+1,k]
3262
3263            volumes.append([v1,v2,v3]) #Upper element
3264            volumes.append([v4,v3,v2]) #Lower element
3265
3266    volumes = num.array(volumes, num.int)      #array default#
3267
3268    if origin is None:
3269        origin = Geo_reference(refzone, min(x), min(y))
3270    geo_ref = write_NetCDF_georeference(origin, outfile)
3271
3272    if elevation is not None:
3273        z = elevation
3274    else:
3275        if inverted_bathymetry:
3276            z = -1 * elevations
3277        else:
3278            z = elevations
3279    #FIXME: z should be obtained from MOST and passed in here
3280
3281    #FIXME use the Write_sww instance(sww) to write this info
3282    z = num.resize(z, outfile.variables['z'][:].shape)
3283    outfile.variables['x'][:] = x - geo_ref.get_xllcorner()
3284    outfile.variables['y'][:] = y - geo_ref.get_yllcorner()
3285    outfile.variables['z'][:] = z             #FIXME HACK for bacwards compat.
3286    outfile.variables['elevation'][:] = z
3287    outfile.variables['volumes'][:] = volumes.astype(num.int32) #For Opteron 64
3288
3289    #Time stepping
3290    stage = outfile.variables['stage']
3291    xmomentum = outfile.variables['xmomentum']
3292    ymomentum = outfile.variables['ymomentum']
3293
3294    if verbose: print 'Converting quantities'
3295
3296    n = len(times)
3297    for j in range(n):
3298        if verbose and j % ((n+10)/10) == 0: print '  Doing %d of %d' %(j, n)
3299
3300        i = 0
3301        for k in range(number_of_latitudes):      # Y direction
3302            for l in range(number_of_longitudes): # X direction
3303                w = zscale * amplitudes[j,k,l] / 100 + mean_stage
3304                stage[j,i] = w
3305                h = w - z[i]
3306                xmomentum[j,i] = uspeed[j,k,l]/100*h
3307                ymomentum[j,i] = vspeed[j,k,l]/100*h
3308                i += 1
3309
3310    #outfile.close()
3311
3312    #FIXME: Refactor using code from file_function.statistics
3313    #Something like print swwstats(swwname)
3314    if verbose:
3315        x = outfile.variables['x'][:]
3316        y = outfile.variables['y'][:]
3317        print '------------------------------------------------'
3318        print 'Statistics of output file:'
3319        print '  Name: %s' %swwname
3320        print '  Reference:'
3321        print '    Lower left corner: [%f, %f]' \
3322              % (geo_ref.get_xllcorner(), geo_ref.get_yllcorner())
3323        print ' Start time: %f' %starttime
3324        print '    Min time: %f' %mint
3325        print '    Max time: %f' %maxt
3326        print '  Extent:'
3327        print '    x [m] in [%f, %f], len(x) == %d' \
3328              % (num.min(x), num.max(x), len(x.flat))
3329        print '    y [m] in [%f, %f], len(y) == %d' \
3330              % (num.min(y), num.max(y), len(y.flat))
3331        print '    t [s] in [%f, %f], len(t) == %d' \
3332              % (min(times), max(times), len(times))
3333        print '  Quantities [SI units]:'
3334        for name in ['stage', 'xmomentum', 'ymomentum', 'elevation']:
3335            q = outfile.variables[name][:].flatten()
3336            print '    %s in [%f, %f]' % (name, min(q), max(q))
3337
3338    outfile.close()
3339
3340
3341##
3342# @brief Convert time-series text file to TMS file.
3343# @param filename
3344# @param quantity_names
3345# @param time_as_seconds
3346def timefile2netcdf(filename, quantity_names=None, time_as_seconds=False):
3347    """Template for converting typical text files with time series to
3348    NetCDF tms file.
3349
3350    The file format is assumed to be either two fields separated by a comma:
3351
3352        time [DD/MM/YY hh:mm:ss], value0 value1 value2 ...
3353
3354    E.g
3355
3356      31/08/04 00:00:00, 1.328223 0 0
3357      31/08/04 00:15:00, 1.292912 0 0
3358
3359    or time (seconds), value0 value1 value2 ...
3360
3361      0.0, 1.328223 0 0
3362      0.1, 1.292912 0 0
3363
3364    will provide a time dependent function f(t) with three attributes
3365
3366    filename is assumed to be the rootname with extenisons .txt and .sww
3367    """
3368
3369    import time, calendar
3370    from anuga.config import time_format
3371    from anuga.utilities.numerical_tools import ensure_numeric
3372
3373    file_text = filename + '.txt'
3374    fid = open(file_text)
3375    line = fid.readline()
3376    fid.close()
3377
3378    fields = line.split(',')
3379    msg = "File %s must have the format 'datetime, value0 value1 value2 ...'" \
3380          % file_text
3381    assert len(fields) == 2, msg
3382
3383    if not time_as_seconds:
3384        try:
3385            starttime = calendar.timegm(time.strptime(fields[0], time_format))
3386        except ValueError:
3387            msg = 'First field in file %s must be' % file_text
3388            msg += ' date-time with format %s.\n' % time_format
3389            msg += 'I got %s instead.' % fields[0]
3390            raise DataTimeError, msg
3391    else:
3392        try:
3393            starttime = float(fields[0])
3394        except Error:
3395            msg = "Bad time format"
3396            raise DataTimeError, msg
3397
3398    # Split values
3399    values = []
3400    for value in fields[1].split():
3401        values.append(float(value))
3402
3403    q = ensure_numeric(values)
3404
3405    msg = 'ERROR: File must contain at least one independent value'
3406    assert len(q.shape) == 1, msg
3407
3408    # Read times proper
3409    from anuga.config import time_format
3410    import time, calendar
3411
3412    fid = open(file_text)
3413    lines = fid.readlines()
3414    fid.close()
3415
3416    N = len(lines)
3417    d = len(q)
3418
3419    T = num.zeros(N, num.float)       # Time
3420    Q = num.zeros((N, d), num.float)  # Values
3421
3422    for i, line in enumerate(lines):
3423        fields = line.split(',')
3424        if not time_as_seconds:
3425            realtime = calendar.timegm(time.strptime(fields[0], time_format))
3426        else:
3427             realtime = float(fields[0])
3428        T[i] = realtime - starttime
3429
3430        for j, value in enumerate(fields[1].split()):
3431            Q[i, j] = float(value)
3432
3433    msg = 'File %s must list time as a monotonuosly ' % filename
3434    msg += 'increasing sequence'
3435    assert num.alltrue(T[1:] - T[:-1] > 0), msg
3436
3437    #Create NetCDF file
3438    from Scientific.IO.NetCDF import NetCDFFile
3439
3440    fid = NetCDFFile(filename + '.tms', netcdf_mode_w)
3441
3442    fid.institution = 'Geoscience Australia'
3443    fid.description = 'Time series'
3444
3445    #Reference point
3446    #Start time in seconds since the epoch (midnight 1/1/1970)
3447    #FIXME: Use Georef
3448    fid.starttime = starttime
3449
3450    # dimension definitions
3451    #fid.createDimension('number_of_volumes', self.number_of_volumes)
3452    #fid.createDimension('number_of_vertices', 3)
3453
3454    fid.createDimension('number_of_timesteps', len(T))
3455
3456    fid.createVariable('time', netcdf_float, ('number_of_timesteps',))
3457
3458    fid.variables['time'][:] = T
3459
3460    for i in range(Q.shape[1]):
3461        try:
3462            name = quantity_names[i]
3463        except:
3464            name = 'Attribute%d' % i
3465
3466        fid.createVariable(name, netcdf_float, ('number_of_timesteps',))
3467        fid.variables[name][:] = Q[:,i]
3468
3469    fid.close()
3470
3471
3472##
3473# @brief Get the extents of a NetCDF data file.
3474# @param file_name The path to the SWW file.
3475# @return A list of x, y, z and stage limits (min, max).
3476def extent_sww(file_name):
3477    """Read in an sww file.
3478
3479    Input:
3480    file_name - the sww file
3481
3482    Output:
3483    A list: [min(x),max(x),min(y),max(y),min(stage.flat),max(stage.flat)]
3484    """
3485
3486    from Scientific.IO.NetCDF import NetCDFFile
3487
3488    #Get NetCDF
3489    fid = NetCDFFile(file_name, netcdf_mode_r)
3490
3491    # Get the variables
3492    x = fid.variables['x'][:]
3493    y = fid.variables['y'][:]
3494    stage = fid.variables['stage'][:]
3495
3496    fid.close()
3497
3498    return [min(x), max(x), min(y), max(y), num.min(stage), num.max(stage)]
3499
3500
3501##
3502# @brief
3503# @param filename
3504# @param boundary
3505# @param t
3506# @param fail_if_NaN
3507# @param NaN_filler
3508# @param verbose
3509# @param very_verbose
3510# @return
3511def sww2domain(filename, boundary=None, t=None,
3512               fail_if_NaN=True, NaN_filler=0,
3513               verbose=False, very_verbose=False):
3514    """
3515    Usage: domain = sww2domain('file.sww',t=time (default = last time in file))
3516
3517    Boundary is not recommended if domain.smooth is not selected, as it
3518    uses unique coordinates, but not unique boundaries. This means that
3519    the boundary file will not be compatable with the coordinates, and will
3520    give a different final boundary, or crash.
3521    """
3522
3523    from Scientific.IO.NetCDF import NetCDFFile
3524    from shallow_water import Domain
3525
3526    # initialise NaN.
3527    NaN = 9.969209968386869e+036
3528
3529    if verbose: print 'Reading from ', filename
3530
3531    fid = NetCDFFile(filename, netcdf_mode_r)    # Open existing file for read
3532    time = fid.variables['time']       # Timesteps
3533    if t is None:
3534        t = time[-1]
3535    time_interp = get_time_interp(time,t)
3536
3537    # Get the variables as numeric arrays
3538    x = fid.variables['x'][:]                   # x-coordinates of vertices
3539    y = fid.variables['y'][:]                   # y-coordinates of vertices
3540    elevation = fid.variables['elevation']      # Elevation
3541    stage = fid.variables['stage']              # Water level
3542    xmomentum = fid.variables['xmomentum']      # Momentum in the x-direction
3543    ymomentum = fid.variables['ymomentum']      # Momentum in the y-direction
3544
3545    starttime = fid.starttime[0]
3546    volumes = fid.variables['volumes'][:]       # Connectivity
3547    coordinates = num.transpose(num.asarray([x.tolist(), y.tolist()]))
3548    # FIXME (Ole): Something like this might be better:
3549    #                 concatenate((x, y), axis=1)
3550    # or              concatenate((x[:,num.newaxis], x[:,num.newaxis]), axis=1)
3551
3552    conserved_quantities = []
3553    interpolated_quantities = {}
3554    other_quantities = []
3555
3556    # get geo_reference
3557    try:                             # sww files don't have to have a geo_ref
3558        geo_reference = Geo_reference(NetCDFObject=fid)
3559    except: # AttributeError, e:
3560        geo_reference = None
3561
3562    if verbose: print '    getting quantities'
3563
3564    for quantity in fid.variables.keys():
3565        dimensions = fid.variables[quantity].dimensions
3566        if 'number_of_timesteps' in dimensions:
3567            conserved_quantities.append(quantity)
3568            interpolated_quantities[quantity] = \
3569                  interpolated_quantity(fid.variables[quantity][:], time_interp)
3570        else:
3571            other_quantities.append(quantity)
3572
3573    other_quantities.remove('x')
3574    other_quantities.remove('y')
3575    other_quantities.remove('z')
3576    other_quantities.remove('volumes')
3577    try:
3578        other_quantities.remove('stage_range')
3579        other_quantities.remove('xmomentum_range')
3580        other_quantities.remove('ymomentum_range')
3581        other_quantities.remove('elevation_range')
3582    except:
3583        pass
3584
3585    conserved_quantities.remove('time')
3586
3587    if verbose: print '    building domain'
3588
3589    #    From domain.Domain:
3590    #    domain = Domain(coordinates, volumes,\
3591    #                    conserved_quantities = conserved_quantities,\
3592    #                    other_quantities = other_quantities,zone=zone,\
3593    #                    xllcorner=xllcorner, yllcorner=yllcorner)
3594
3595    # From shallow_water.Domain:
3596    coordinates = coordinates.tolist()
3597    volumes = volumes.tolist()
3598    # FIXME:should this be in mesh? (peter row)
3599    if fid.smoothing == 'Yes':
3600        unique = False
3601    else:
3602        unique = True
3603    if unique:
3604        coordinates, volumes, boundary = weed(coordinates, volumes,boundary)
3605
3606    try:
3607        domain = Domain(coordinates, volumes, boundary)
3608    except AssertionError, e:
3609        fid.close()
3610        msg = 'Domain could not be created: %s. ' \
3611              'Perhaps use "fail_if_NaN=False and NaN_filler = ..."' % e
3612        raise DataDomainError, msg
3613
3614    if not boundary is None:
3615        domain.boundary = boundary
3616
3617    domain.geo_reference = geo_reference
3618
3619    domain.starttime = float(starttime) + float(t)
3620    domain.time = 0.0
3621
3622    for quantity in other_quantities:
3623        try:
3624            NaN = fid.variables[quantity].missing_value
3625        except:
3626            pass                       # quantity has no missing_value number
3627        X = fid.variables[quantity][:]
3628        if very_verbose:
3629            print '       ', quantity
3630            print '        NaN =', NaN
3631            print '        max(X)'
3632            print '       ', max(X)
3633            print '        max(X)==NaN'
3634            print '       ', max(X)==NaN
3635            print ''
3636        if max(X) == NaN or min(X) == NaN:
3637            if fail_if_NaN:
3638                msg = 'quantity "%s" contains no_data entry' % quantity
3639                raise DataMissingValuesError, msg
3640            else:
3641                data = (X != NaN)
3642                X = (X*data) + (data==0)*NaN_filler
3643        if unique:
3644            X = num.resize(X, (len(X)/3, 3))
3645        domain.set_quantity(quantity, X)
3646    #
3647    for quantity in conserved_quantities:
3648        try:
3649            NaN = fid.variables[quantity].missing_value
3650        except:
3651            pass                       # quantity has no missing_value number
3652        X = interpolated_quantities[quantity]
3653        if very_verbose:
3654            print '       ',quantity
3655            print '        NaN =', NaN
3656            print '        max(X)'
3657            print '       ', max(X)
3658            print '        max(X)==NaN'
3659            print '       ', max(X)==NaN
3660            print ''
3661        if max(X) == NaN or min(X) == NaN:
3662            if fail_if_NaN:
3663                msg = 'quantity "%s" contains no_data entry' % quantity
3664                raise DataMissingValuesError, msg
3665            else:
3666                data = (X != NaN)
3667                X = (X*data) + (data==0)*NaN_filler
3668        if unique:
3669            X = num.resize(X, (X.shape[0]/3, 3))
3670        domain.set_quantity(quantity, X)
3671
3672    fid.close()
3673
3674    return domain
3675
3676
3677##
3678# @brief Interpolate a quantity wrt time.
3679# @param saved_quantity The quantity to interpolate.
3680# @param time_interp (index, ratio)
3681# @return A vector of interpolated values.
3682def interpolated_quantity(saved_quantity, time_interp):
3683    '''Given an index and ratio, interpolate quantity with respect to time.'''
3684
3685    index, ratio = time_interp
3686
3687    Q = saved_quantity
3688
3689    if ratio > 0:
3690        q = (1-ratio)*Q[index] + ratio*Q[index+1]
3691    else:
3692        q = Q[index]
3693
3694    #Return vector of interpolated values
3695    return q
3696
3697
3698##
3699# @brief
3700# @parm time
3701# @param t
3702# @return An (index, ration) tuple.
3703def get_time_interp(time, t=None):
3704    #Finds the ratio and index for time interpolation.
3705    #It is borrowed from previous abstract_2d_finite_volumes code.
3706    if t is None:
3707        t=time[-1]
3708        index = -1
3709        ratio = 0.
3710    else:
3711        T = time
3712        tau = t
3713        index=0
3714        msg = 'Time interval derived from file %s [%s:%s]' \
3715              % ('FIXMEfilename', T[0], T[-1])
3716        msg += ' does not match model time: %s' % tau
3717        if tau < time[0]: raise DataTimeError, msg
3718        if tau > time[-1]: raise DataTimeError, msg
3719        while tau > time[index]: index += 1
3720        while tau < time[index]: index -= 1
3721        if tau == time[index]:
3722            #Protect against case where tau == time[-1] (last time)
3723            # - also works in general when tau == time[i]
3724            ratio = 0
3725        else:
3726            #t is now between index and index+1
3727            ratio = (tau - time[index])/(time[index+1] - time[index])
3728
3729    return (index, ratio)
3730
3731
3732##
3733# @brief
3734# @param coordinates
3735# @param volumes
3736# @param boundary
3737def weed(coordinates, volumes, boundary=None):
3738    if isinstance(coordinates, num.ndarray):
3739        coordinates = coordinates.tolist()
3740    if isinstance(volumes, num.ndarray):
3741        volumes = volumes.tolist()
3742
3743    unique = False
3744    point_dict = {}
3745    same_point = {}
3746    for i in range(len(coordinates)):
3747        point = tuple(coordinates[i])
3748        if point_dict.has_key(point):
3749            unique = True
3750            same_point[i] = point
3751            #to change all point i references to point j
3752        else:
3753            point_dict[point] = i
3754            same_point[i] = point
3755
3756    coordinates = []
3757    i = 0
3758    for point in point_dict.keys():
3759        point = tuple(point)
3760        coordinates.append(list(point))
3761        point_dict[point] = i
3762        i += 1
3763
3764    for volume in volumes:
3765        for i in range(len(volume)):
3766            index = volume[i]
3767            if index > -1:
3768                volume[i] = point_dict[same_point[index]]
3769
3770    new_boundary = {}
3771    if not boundary is None:
3772        for segment in boundary.keys():
3773            point0 = point_dict[same_point[segment[0]]]
3774            point1 = point_dict[same_point[segment[1]]]
3775            label = boundary[segment]
3776            #FIXME should the bounday attributes be concaterated
3777            #('exterior, pond') or replaced ('pond')(peter row)
3778
3779            if new_boundary.has_key((point0, point1)):
3780                new_boundary[(point0,point1)] = new_boundary[(point0, point1)]
3781
3782            elif new_boundary.has_key((point1, point0)):
3783                new_boundary[(point1,point0)] = new_boundary[(point1, point0)]
3784            else: new_boundary[(point0, point1)] = label
3785
3786        boundary = new_boundary
3787
3788    return coordinates, volumes, boundary
3789
3790
3791##
3792# @brief Read DEM file, decimate data, write new DEM file.
3793# @param basename_in Stem of the input filename.
3794# @param stencil
3795# @param cellsize_new New cell size to resample on.
3796# @param basename_out Stem of the output filename.
3797# @param verbose True if this function is to be verbose.
3798def decimate_dem(basename_in, stencil, cellsize_new, basename_out=None,
3799                 verbose=False):
3800    """Read Digitial Elevation model from the following NetCDF format (.dem)
3801
3802    Example:
3803
3804    ncols         3121
3805    nrows         1800
3806    xllcorner     722000
3807    yllcorner     5893000
3808    cellsize      25
3809    NODATA_value  -9999
3810    138.3698 137.4194 136.5062 135.5558 ..........
3811
3812    Decimate data to cellsize_new using stencil and write to NetCDF dem format.
3813    """
3814
3815    import os
3816    from Scientific.IO.NetCDF import NetCDFFile
3817
3818    root = basename_in
3819    inname = root + '.dem'
3820
3821    #Open existing netcdf file to read
3822    infile = NetCDFFile(inname, netcdf_mode_r)
3823
3824    if verbose: print 'Reading DEM from %s' % inname
3825
3826    #Read metadata
3827    ncols = infile.ncols[0]
3828    nrows = infile.nrows[0]
3829    xllcorner = infile.xllcorner[0]
3830    yllcorner = infile.yllcorner[0]
3831    cellsize = infile.cellsize[0]
3832    NODATA_value = infile.NODATA_value[0]
3833    zone = infile.zone[0]
3834    false_easting = infile.false_easting[0]
3835    false_northing = infile.false_northing[0]
3836    projection = infile.projection
3837    datum = infile.datum
3838    units = infile.units
3839
3840    dem_elevation = infile.variables['elevation']
3841
3842    #Get output file name
3843    if basename_out == None:
3844        outname = root + '_' + repr(cellsize_new) + '.dem'
3845    else:
3846        outname = basename_out + '.dem'
3847
3848    if verbose: print 'Write decimated NetCDF file to %s' % outname
3849
3850    #Determine some dimensions for decimated grid
3851    (nrows_stencil, ncols_stencil) = stencil.shape
3852    x_offset = ncols_stencil / 2
3853    y_offset = nrows_stencil / 2
3854    cellsize_ratio = int(cellsize_new / cellsize)
3855    ncols_new = 1 + (ncols - ncols_stencil) / cellsize_ratio
3856    nrows_new = 1 + (nrows - nrows_stencil) / cellsize_ratio
3857
3858    #Open netcdf file for output
3859    outfile = NetCDFFile(outname, netcdf_mode_w)
3860
3861    #Create new file
3862    outfile.institution = 'Geoscience Australia'
3863    outfile.description = 'NetCDF DEM format for compact and portable ' \
3864                          'storage of spatial point data'
3865
3866    #Georeferencing
3867    outfile.zone = zone
3868    outfile.projection = projection
3869    outfile.datum = datum
3870    outfile.units = units
3871
3872    outfile.cellsize = cellsize_new
3873    outfile.NODATA_value = NODATA_value
3874    outfile.false_easting = false_easting
3875    outfile.false_northing = false_northing
3876
3877    outfile.xllcorner = xllcorner + (x_offset * cellsize)
3878    outfile.yllcorner = yllcorner + (y_offset * cellsize)
3879    outfile.ncols = ncols_new
3880    outfile.nrows = nrows_new
3881
3882    # dimension definition
3883    outfile.createDimension('number_of_points', nrows_new*ncols_new)
3884
3885    # variable definition
3886    outfile.createVariable('elevation', netcdf_float, ('number_of_points',))
3887
3888    # Get handle to the variable
3889    elevation = outfile.variables['elevation']
3890
3891    dem_elevation_r = num.reshape(dem_elevation, (nrows, ncols))
3892
3893    #Store data
3894    global_index = 0
3895    for i in range(nrows_new):
3896        if verbose: print 'Processing row %d of %d' %(i, nrows_new)
3897
3898        lower_index = global_index
3899        telev = num.zeros(ncols_new, num.float)
3900        local_index = 0
3901        trow = i * cellsize_ratio
3902
3903        for j in range(ncols_new):
3904            tcol = j * cellsize_ratio
3905            tmp = dem_elevation_r[trow:trow+nrows_stencil,
3906                                  tcol:tcol+ncols_stencil]
3907
3908            #if dem contains 1 or more NODATA_values set value in
3909            #decimated dem to NODATA_value, else compute decimated
3910            #value using stencil
3911            if num.sum(num.sum(num.equal(tmp, NODATA_value))) > 0:
3912                telev[local_index] = NODATA_value
3913            else:
3914                telev[local_index] = num.sum(num.sum(tmp * stencil))
3915
3916            global_index += 1
3917            local_index += 1
3918
3919        upper_index = global_index
3920
3921        elevation[lower_index:upper_index] = telev
3922
3923    assert global_index == nrows_new*ncols_new, \
3924           'index not equal to number of points'
3925
3926    infile.close()
3927    outfile.close()
3928
3929
3930##
3931# @brief
3932# @param filename
3933# @param verbose
3934def tsh2sww(filename, verbose=False):
3935    """
3936    to check if a tsh/msh file 'looks' good.
3937    """
3938
3939    if verbose == True:print 'Creating domain from', filename
3940
3941    domain = pmesh_to_domain_instance(filename, Domain)
3942
3943    if verbose == True:print "Number of triangles = ", len(domain)
3944
3945    domain.smooth = True
3946    domain.format = 'sww'   #Native netcdf visualisation format
3947    file_path, filename = path.split(filename)
3948    filename, ext = path.splitext(filename)
3949    domain.set_name(filename)
3950    domain.reduction = mean
3951
3952    if verbose == True:print "file_path",file_path
3953
3954    if file_path == "":
3955        file_path = "."
3956    domain.set_datadir(file_path)
3957
3958    if verbose == True:
3959        print "Output written to " + domain.get_datadir() + sep + \
3960              domain.get_name() + "." + domain.format
3961
3962    sww = get_dataobject(domain)
3963    sww.store_connectivity()
3964    sww.store_timestep()
3965
3966
3967##
3968# @brief Convert CSIRO ESRI file to an SWW boundary file.
3969# @param bath_dir
3970# @param elevation_dir
3971# @param ucur_dir
3972# @param vcur_dir
3973# @param sww_file
3974# @param minlat
3975# @param maxlat
3976# @param minlon
3977# @param maxlon
3978# @param zscale
3979# @param mean_stage
3980# @param fail_on_NaN
3981# @param elevation_NaN_filler
3982# @param bath_prefix
3983# @param elevation_prefix
3984# @param verbose
3985# @note Also convert latitude and longitude to UTM. All coordinates are
3986#       assumed to be given in the GDA94 datum.
3987def asc_csiro2sww(bath_dir,
3988                  elevation_dir,
3989                  ucur_dir,
3990                  vcur_dir,
3991                  sww_file,
3992                  minlat=None, maxlat=None,
3993                  minlon=None, maxlon=None,
3994                  zscale=1,
3995                  mean_stage=0,
3996                  fail_on_NaN=True,
3997                  elevation_NaN_filler=0,
3998                  bath_prefix='ba',
3999                  elevation_prefix='el',
4000                  verbose=False):
4001    """
4002    Produce an sww boundary file, from esri ascii data from CSIRO.
4003
4004    Also convert latitude and longitude to UTM. All coordinates are
4005    assumed to be given in the GDA94 datum.
4006
4007    assume:
4008    All files are in esri ascii format
4009
4010    4 types of information
4011    bathymetry
4012    elevation
4013    u velocity
4014    v velocity
4015
4016    Assumptions
4017    The metadata of all the files is the same
4018    Each type is in a seperate directory
4019    One bath file with extention .000
4020    The time period is less than 24hrs and uniform.
4021    """
4022
4023    from Scientific.IO.NetCDF import NetCDFFile
4024
4025    from anuga.coordinate_transforms.redfearn import redfearn
4026
4027    precision = netcdf_float # So if we want to change the precision its done here
4028
4029    # go in to the bath dir and load the only file,
4030    bath_files = os.listdir(bath_dir)
4031    bath_file = bath_files[0]
4032    bath_dir_file =  bath_dir + os.sep + bath_file
4033    bath_metadata, bath_grid =  _read_asc(bath_dir_file)
4034
4035    #Use the date.time of the bath file as a basis for
4036    #the start time for other files
4037    base_start = bath_file[-12:]
4038
4039    #go into the elevation dir and load the 000 file
4040    elevation_dir_file = elevation_dir  + os.sep + elevation_prefix \
4041                         + base_start
4042
4043    elevation_files = os.listdir(elevation_dir)
4044    ucur_files = os.listdir(ucur_dir)
4045    vcur_files = os.listdir(vcur_dir)
4046    elevation_files.sort()
4047
4048    # the first elevation file should be the
4049    # file with the same base name as the bath data
4050    assert elevation_files[0] == 'el' + base_start
4051
4052    number_of_latitudes = bath_grid.shape[0]
4053    number_of_longitudes = bath_grid.shape[1]
4054    number_of_volumes = (number_of_latitudes-1) * (number_of_longitudes-1) * 2
4055
4056    longitudes = [bath_metadata['xllcorner'] + x*bath_metadata['cellsize'] \
4057                  for x in range(number_of_longitudes)]
4058    latitudes = [bath_metadata['yllcorner'] + y*bath_metadata['cellsize'] \
4059                 for y in range(number_of_latitudes)]
4060
4061     # reverse order of lat, so the first lat represents the first grid row
4062    latitudes.reverse()
4063
4064    kmin, kmax, lmin, lmax = _get_min_max_indexes(latitudes[:],longitudes[:],
4065                                                  minlat=minlat, maxlat=maxlat,
4066                                                  minlon=minlon, maxlon=maxlon)
4067
4068    bath_grid = bath_grid[kmin:kmax,lmin:lmax]
4069    latitudes = latitudes[kmin:kmax]
4070    longitudes = longitudes[lmin:lmax]
4071    number_of_latitudes = len(latitudes)
4072    number_of_longitudes = len(longitudes)
4073    number_of_times = len(os.listdir(elevation_dir))
4074    number_of_points = number_of_latitudes * number_of_longitudes
4075    number_of_volumes = (number_of_latitudes-1) * (number_of_longitudes-1) * 2
4076
4077    #Work out the times
4078    if len(elevation_files) > 1:
4079        # Assume: The time period is less than 24hrs.
4080        time_period = (int(elevation_files[1][-3:]) \
4081                       - int(elevation_files[0][-3:])) * 60*60
4082        times = [x*time_period for x in range(len(elevation_files))]
4083    else:
4084        times = [0.0]
4085
4086    if verbose:
4087        print '------------------------------------------------'
4088        print 'Statistics:'
4089        print '  Extent (lat/lon):'
4090        print '    lat in [%f, %f], len(lat) == %d' \
4091              % (min(latitudes), max(latitudes), len(latitudes))
4092        print '    lon in [%f, %f], len(lon) == %d' \
4093              % (min(longitudes), max(longitudes), len(longitudes))
4094        print '    t in [%f, %f], len(t) == %d' \
4095              % (min(times), max(times), len(times))
4096
4097    ######### WRITE THE SWW FILE #############
4098
4099    # NetCDF file definition
4100    outfile = NetCDFFile(sww_file, netcdf_mode_w)
4101
4102    #Create new file
4103    outfile.institution = 'Geoscience Australia'
4104    outfile.description = 'Converted from XXX'
4105
4106    #For sww compatibility
4107    outfile.smoothing = 'Yes'
4108    outfile.order = 1
4109
4110    #Start time in seconds since the epoch (midnight 1/1/1970)
4111    outfile.starttime = starttime = times[0]
4112
4113    # dimension definitions
4114    outfile.createDimension('number_of_volumes', number_of_volumes)
4115    outfile.createDimension('number_of_vertices', 3)
4116    outfile.createDimension('number_of_points', number_of_points)
4117    outfile.createDimension('number_of_timesteps', number_of_times)
4118
4119    # variable definitions
4120    outfile.createVariable('x', precision, ('number_of_points',))
4121    outfile.createVariable('y', precision, ('number_of_points',))
4122    outfile.createVariable('elevation', precision, ('number_of_points',))
4123
4124    #FIXME: Backwards compatibility
4125    outfile.createVariable('z', precision, ('number_of_points',))
4126    #################################
4127
4128    outfile.createVariable('volumes', netcdf_int, ('number_of_volumes',
4129                                                   'number_of_vertices'))
4130
4131    outfile.createVariable('time', precision, ('number_of_timesteps',))
4132
4133    outfile.createVariable('stage', precision, ('number_of_timesteps',
4134                                                'number_of_points'))
4135
4136    outfile.createVariable('xmomentum', precision, ('number_of_timesteps',
4137                                                    'number_of_points'))
4138
4139    outfile.createVariable('ymomentum', precision, ('number_of_timesteps',
4140                                                    'number_of_points'))
4141
4142    #Store
4143    from anuga.coordinate_transforms.redfearn import redfearn
4144
4145    x = num.zeros(number_of_points, num.float)  #Easting
4146    y = num.zeros(number_of_points, num.float)  #Northing
4147
4148    if verbose: print 'Making triangular grid'
4149
4150    #Get zone of 1st point.
4151    refzone, _, _ = redfearn(latitudes[0], longitudes[0])
4152
4153    vertices = {}
4154    i = 0
4155    for k, lat in enumerate(latitudes):
4156        for l, lon in enumerate(longitudes):
4157            vertices[l,k] = i
4158
4159            zone, easting, northing = redfearn(lat, lon)
4160
4161            #msg = 'Zone boundary crossed at longitude =', lon
4162            #assert zone == refzone, msg
4163            #print '%7.2f %7.2f %8.2f %8.2f' %(lon, lat, easting, northing)
4164            x[i] = easting
4165            y[i] = northing
4166            i += 1
4167
4168    #Construct 2 triangles per 'rectangular' element
4169    volumes = []
4170    for l in range(number_of_longitudes-1):    #X direction
4171        for k in range(number_of_latitudes-1): #Y direction
4172            v1 = vertices[l,k+1]
4173            v2 = vertices[l,k]
4174            v3 = vertices[l+1,k+1]
4175            v4 = vertices[l+1,k]
4176
4177            #Note, this is different to the ferrit2sww code
4178            #since the order of the lats is reversed.
4179            volumes.append([v1,v3,v2]) #Upper element
4180            volumes.append([v4,v2,v3]) #Lower element
4181
4182    volumes = num.array(volumes, num.int)      #array default#
4183
4184    geo_ref = Geo_reference(refzone, min(x), min(y))
4185    geo_ref.write_NetCDF(outfile)
4186
4187    # This will put the geo ref in the middle
4188    #geo_ref = Geo_reference(refzone, (max(x)+min(x))/2., (max(x)+min(y))/2.)
4189
4190    if verbose:
4191        print '------------------------------------------------'
4192        print 'More Statistics:'
4193        print '  Extent (/lon):'
4194        print '    x in [%f, %f], len(lat) == %d' \
4195              % (min(x), max(x), len(x))
4196        print '    y in [%f, %f], len(lon) == %d' \
4197              % (min(y), max(y), len(y))
4198        print 'geo_ref: ', geo_ref
4199
4200    z = num.resize(bath_grid,outfile.variables['z'][:].shape)
4201    outfile.variables['x'][:] = x - geo_ref.get_xllcorner()
4202    outfile.variables['y'][:] = y - geo_ref.get_yllcorner()
4203# FIXME (Ole): Remove once viewer has been recompiled and changed
4204#              to use elevation instead of z
4205    outfile.variables['z'][:] = z
4206    outfile.variables['elevation'][:] = z
4207    outfile.variables['volumes'][:] = volumes.astype(num.int32) # On Opteron 64
4208
4209    stage = outfile.variables['stage']
4210    xmomentum = outfile.variables['xmomentum']
4211    ymomentum = outfile.variables['ymomentum']
4212
4213    outfile.variables['time'][:] = times   #Store time relative
4214
4215    if verbose: print 'Converting quantities'
4216
4217    n = number_of_times
4218    for j in range(number_of_times):
4219        # load in files
4220        elevation_meta, elevation_grid = \
4221            _read_asc(elevation_dir + os.sep + elevation_files[j])
4222
4223        _, u_momentum_grid = _read_asc(ucur_dir + os.sep + ucur_files[j])
4224        _, v_momentum_grid = _read_asc(vcur_dir + os.sep + vcur_files[j])
4225
4226        #cut matrix to desired size
4227        elevation_grid = elevation_grid[kmin:kmax,lmin:lmax]
4228        u_momentum_grid = u_momentum_grid[kmin:kmax,lmin:lmax]
4229        v_momentum_grid = v_momentum_grid[kmin:kmax,lmin:lmax]
4230
4231        # handle missing values
4232        missing = (elevation_grid == elevation_meta['NODATA_value'])
4233        if num.sometrue (missing):
4234            if fail_on_NaN:
4235                msg = 'File %s contains missing values' \
4236                      % (elevation_files[j])
4237                raise DataMissingValuesError, msg
4238            else:
4239                elevation_grid = elevation_grid*(missing==0) \
4240                                 + missing*elevation_NaN_filler
4241
4242        if verbose and j % ((n+10)/10) == 0: print '  Doing %d of %d' % (j, n)
4243
4244        i = 0
4245        for k in range(number_of_latitudes):      #Y direction
4246            for l in range(number_of_longitudes): #X direction
4247                w = zscale*elevation_grid[k,l] + mean_stage
4248                stage[j,i] = w
4249                h = w - z[i]
4250                xmomentum[j,i] = u_momentum_grid[k,l]*h
4251                ymomentum[j,i] = v_momentum_grid[k,l]*h
4252                i += 1
4253
4254    outfile.close()
4255
4256
4257##
4258# @brief Return max&min indexes (for slicing) of area specified.
4259# @param latitudes_ref ??
4260# @param longitudes_ref ??
4261# @param minlat Minimum latitude of specified area.
4262# @param maxlat Maximum latitude of specified area.
4263# @param minlon Minimum longitude of specified area.
4264# @param maxlon Maximum longitude of specified area.
4265# @return Tuple (lat_min_index, lat_max_index, lon_min_index, lon_max_index)
4266def _get_min_max_indexes(latitudes_ref,longitudes_ref,
4267                         minlat=None, maxlat=None,
4268                         minlon=None, maxlon=None):
4269    """
4270    Return max, min indexes (for slicing) of the lat and long arrays to cover
4271    the area specified with min/max lat/long.
4272
4273    Think of the latitudes and longitudes describing a 2d surface.
4274    The area returned is, if possible, just big enough to cover the
4275    inputed max/min area. (This will not be possible if the max/min area
4276    has a section outside of the latitudes/longitudes area.)
4277
4278    asset  longitudes are sorted,
4279    long - from low to high (west to east, eg 148 - 151)
4280    assert latitudes are sorted, ascending or decending
4281    """
4282
4283    latitudes = latitudes_ref[:]
4284    longitudes = longitudes_ref[:]
4285
4286    latitudes = ensure_numeric(latitudes)
4287    longitudes = ensure_numeric(longitudes)
4288
4289    assert num.allclose(num.sort(longitudes), longitudes)
4290
4291    #print latitudes[0],longitudes[0]
4292    #print len(latitudes),len(longitudes)
4293    #print latitudes[len(latitudes)-1],longitudes[len(longitudes)-1]
4294
4295    lat_ascending = True
4296    if not num.allclose(num.sort(latitudes), latitudes):
4297        lat_ascending = False
4298        # reverse order of lat, so it's in ascending order
4299        latitudes = latitudes[::-1]
4300        assert num.allclose(num.sort(latitudes), latitudes)
4301
4302    largest_lat_index = len(latitudes)-1
4303
4304    #Cut out a smaller extent.
4305    if minlat == None:
4306        lat_min_index = 0
4307    else:
4308        lat_min_index = num.searchsorted(latitudes, minlat)-1
4309        if lat_min_index <0:
4310            lat_min_index = 0
4311
4312    if maxlat == None:
4313        lat_max_index = largest_lat_index #len(latitudes)
4314    else:
4315        lat_max_index = num.searchsorted(latitudes, maxlat)
4316        if lat_max_index > largest_lat_index:
4317            lat_max_index = largest_lat_index
4318
4319    if minlon == None:
4320        lon_min_index = 0
4321    else:
4322        lon_min_index = num.searchsorted(longitudes, minlon)-1
4323        if lon_min_index <0:
4324            lon_min_index = 0
4325
4326    if maxlon == None:
4327        lon_max_index = len(longitudes)
4328    else:
4329        lon_max_index = num.searchsorted(longitudes, maxlon)
4330
4331    # Reversing the indexes, if the lat array is decending
4332    if lat_ascending is False:
4333        lat_min_index, lat_max_index = largest_lat_index - lat_max_index, \
4334                                       largest_lat_index - lat_min_index
4335    lat_max_index = lat_max_index + 1 # taking into account how slicing works
4336    lon_max_index = lon_max_index + 1 # taking into account how slicing works
4337
4338    return lat_min_index, lat_max_index, lon_min_index, lon_max_index
4339
4340
4341##
4342# @brief Read an ASC file.
4343# @parem filename Path to the file to read.
4344# @param verbose True if this function is to be verbose.
4345def _read_asc(filename, verbose=False):
4346    """Read esri file from the following ASCII format (.asc)
4347
4348    Example:
4349
4350    ncols         3121
4351    nrows         1800
4352    xllcorner     722000
4353    yllcorner     5893000
4354    cellsize      25
4355    NODATA_value  -9999
4356    138.3698 137.4194 136.5062 135.5558 ..........
4357    """
4358
4359    datafile = open(filename)
4360
4361    if verbose: print 'Reading DEM from %s' % filename
4362
4363    lines = datafile.readlines()
4364    datafile.close()
4365
4366    if verbose: print 'Got', len(lines), ' lines'
4367
4368    ncols = int(lines.pop(0).split()[1].strip())
4369    nrows = int(lines.pop(0).split()[1].strip())
4370    xllcorner = float(lines.pop(0).split()[1].strip())
4371    yllcorner = float(lines.pop(0).split()[1].strip())
4372    cellsize = float(lines.pop(0).split()[1].strip())
4373    NODATA_value = float(lines.pop(0).split()[1].strip())
4374
4375    assert len(lines) == nrows
4376
4377    #Store data
4378    grid = []
4379
4380    n = len(lines)
4381    for i, line in enumerate(lines):
4382        cells = line.split()
4383        assert len(cells) == ncols
4384        grid.append(num.array([float(x) for x in cells]))
4385    grid = num.array(grid)
4386
4387    return {'xllcorner':xllcorner,
4388            'yllcorner':yllcorner,
4389            'cellsize':cellsize,
4390            'NODATA_value':NODATA_value}, grid
4391
4392
4393    ####  URS 2 SWW  ###
4394
4395# Definitions of various NetCDF dimension names, etc.
4396lon_name = 'LON'
4397lat_name = 'LAT'
4398time_name = 'TIME'
4399precision = netcdf_float # So if we want to change the precision its done here
4400
4401##
4402# @brief Clas for a NetCDF data file writer.
4403class Write_nc:
4404    """Write an nc file.
4405
4406    Note, this should be checked to meet cdc netcdf conventions for gridded
4407    data. http://www.cdc.noaa.gov/cdc/conventions/cdc_netcdf_standard.shtml
4408    """
4409
4410    ##
4411    # @brief Instantiate a Write_nc instance.
4412    # @param quantity_name
4413    # @param file_name
4414    # @param time_step_count The number of time steps.
4415    # @param time_step The time_step size.
4416    # @param lon
4417    # @param lat
4418    def __init__(self,
4419                 quantity_name,
4420                 file_name,
4421                 time_step_count,
4422                 time_step,
4423                 lon,
4424                 lat):
4425        """Instantiate a Write_nc instance (NetCDF file writer).
4426
4427        time_step_count is the number of time steps.
4428        time_step is the time step size
4429
4430        pre-condition: quantity_name must be 'HA', 'UA'or 'VA'.
4431        """
4432
4433        self.quantity_name = quantity_name
4434        quantity_units = {'HA':'CENTIMETERS',
4435                          'UA':'CENTIMETERS/SECOND',
4436                          'VA':'CENTIMETERS/SECOND'}
4437
4438        multiplier_dic = {'HA':100.0,   # To convert from m to cm
4439                          'UA':100.0,   #             and m/s to cm/sec
4440                          'VA':-100.0}  # MUX files have positive x in the
4441                                        # Southern direction.  This corrects
4442                                        # for it, when writing nc files.
4443
4444        self.quantity_multiplier =  multiplier_dic[self.quantity_name]
4445
4446        #self.file_name = file_name
4447        self.time_step_count = time_step_count
4448        self.time_step = time_step
4449
4450        # NetCDF file definition
4451        self.outfile = NetCDFFile(file_name, netcdf_mode_w)
4452        outfile = self.outfile
4453
4454        #Create new file
4455        nc_lon_lat_header(outfile, lon, lat)
4456
4457        # TIME
4458        outfile.createDimension(time_name, None)
4459        outfile.createVariable(time_name, precision, (time_name,))
4460
4461        #QUANTITY
4462        outfile.createVariable(self.quantity_name, precision,
4463                               (time_name, lat_name, lon_name))
4464        outfile.variables[self.quantity_name].missing_value = -1.e+034
4465        outfile.variables[self.quantity_name].units = \
4466                                 quantity_units[self.quantity_name]
4467        outfile.variables[lon_name][:]= ensure_numeric(lon)
4468        outfile.variables[lat_name][:]= ensure_numeric(lat)
4469
4470        #Assume no one will be wanting to read this, while we are writing
4471        #outfile.close()
4472
4473    ##
4474    # @brief Write a time-step of quantity data.
4475    # @param quantity_slice The data to be stored for this time-step.
4476    def store_timestep(self, quantity_slice):
4477        """Write a time slice of quantity info
4478
4479        quantity_slice is the data to be stored at this time step
4480        """
4481
4482        # Get the variables
4483        time = self.outfile.variables[time_name]
4484        quantity = self.outfile.variables[self.quantity_name]
4485
4486        # get index oflice to write
4487        i = len(time)
4488
4489        #Store time
4490        time[i] = i * self.time_step    #self.domain.time
4491        quantity[i,:] = quantity_slice * self.quantity_multiplier
4492
4493    ##
4494    # @brief Close file underlying the class instance.
4495    def close(self):
4496        self.outfile.close()
4497
4498
4499##
4500# @brief Convert URS file to SWW file.
4501# @param basename_in Stem of the input filename.
4502# @param basename_out Stem of the output filename.
4503# @param verbose True if this function is to be verbose.
4504# @param remove_nc_files
4505# @param minlat Sets extent of area to be used.  If not supplied, full extent.
4506# @param maxlat Sets extent of area to be used.  If not supplied, full extent.
4507# @param minlon Sets extent of area to be used.  If not supplied, full extent.
4508# @param maxlon Sets extent of area to be used.  If not supplied, full extent.
4509# @param mint
4510# @param maxt
4511# @param mean_stage
4512# @param origin A 3-tuple with geo referenced UTM coordinates
4513# @param zscale
4514# @param fail_on_NaN
4515# @param NaN_filler
4516# @param elevation
4517# @note Also convert latitude and longitude to UTM. All coordinates are
4518#       assumed to be given in the GDA94 datum.
4519def urs2sww(basename_in='o', basename_out=None, verbose=False,
4520            remove_nc_files=True,
4521            minlat=None, maxlat=None,
4522            minlon=None, maxlon=None,
4523            mint=None, maxt=None,
4524            mean_stage=0,
4525            origin=None,
4526            zscale=1,
4527            fail_on_NaN=True,
4528            NaN_filler=0,
4529            elevation=None):
4530    """Convert a URS file to an SWW file.
4531    Convert URS C binary format for wave propagation to
4532    sww format native to abstract_2d_finite_volumes.
4533
4534    Specify only basename_in and read files of the form
4535    basefilename-z-mux2, basefilename-e-mux2 and
4536    basefilename-n-mux2 containing relative height,
4537    x-velocity and y-velocity, respectively.
4538
4539    Also convert latitude and longitude to UTM. All coordinates are
4540    assumed to be given in the GDA94 datum. The latitude and longitude
4541    information is for  a grid.
4542
4543    min's and max's: If omitted - full extend is used.
4544    To include a value min may equal it, while max must exceed it.
4545    Lat and lon are assumed to be in decimal degrees.
4546    NOTE: minlon is the most east boundary.
4547
4548    origin is a 3-tuple with geo referenced
4549    UTM coordinates (zone, easting, northing)
4550    It will be the origin of the sww file. This shouldn't be used,
4551    since all of anuga should be able to handle an arbitary origin.
4552
4553    URS C binary format has data orgainised as TIME, LONGITUDE, LATITUDE
4554    which means that latitude is the fastest
4555    varying dimension (row major order, so to speak)
4556
4557    In URS C binary the latitudes and longitudes are in assending order.
4558    """
4559
4560    if basename_out == None:
4561        basename_out = basename_in
4562
4563    files_out = urs2nc(basename_in, basename_out)
4564
4565    ferret2sww(basename_out,
4566               minlat=minlat,
4567               maxlat=maxlat,
4568               minlon=minlon,
4569               maxlon=maxlon,
4570               mint=mint,
4571               maxt=maxt,
4572               mean_stage=mean_stage,
4573               origin=origin,
4574               zscale=zscale,
4575               fail_on_NaN=fail_on_NaN,
4576               NaN_filler=NaN_filler,
4577               inverted_bathymetry=True,
4578               verbose=verbose)
4579   
4580    if remove_nc_files:
4581        for file_out in files_out:
4582            os.remove(file_out)
4583
4584
4585##
4586# @brief Convert 3 URS files back to 4 NC files.
4587# @param basename_in Stem of the input filenames.
4588# @param basename_outStem of the output filenames.
4589# @note The name of the urs file names must be:
4590#          [basename_in]-z-mux
4591#          [basename_in]-e-mux
4592#          [basename_in]-n-mux
4593def urs2nc(basename_in='o', basename_out='urs'):
4594    """Convert the 3 urs files to 4 nc files.
4595
4596    The name of the urs file names must be;
4597    [basename_in]-z-mux
4598    [basename_in]-e-mux
4599    [basename_in]-n-mux
4600    """
4601
4602    files_in = [basename_in + WAVEHEIGHT_MUX_LABEL,
4603                basename_in + EAST_VELOCITY_LABEL,
4604                basename_in + NORTH_VELOCITY_LABEL]
4605    files_out = [basename_out + '_ha.nc',
4606                 basename_out + '_ua.nc',
4607                 basename_out + '_va.nc']
4608    quantities = ['HA', 'UA', 'VA']
4609
4610    #if os.access(files_in[0]+'.mux', os.F_OK) == 0 :
4611    for i, file_name in enumerate(files_in):
4612        if os.access(file_name, os.F_OK) == 0:
4613            if os.access(file_name + '.mux', os.F_OK) == 0 :
4614                msg = 'File %s does not exist or is not accessible' % file_name
4615                raise IOError, msg
4616            else:
4617               files_in[i] += '.mux'
4618               print "file_name", file_name
4619
4620    hashed_elevation = None
4621    for file_in, file_out, quantity in map(None, files_in,
4622                                           files_out,
4623                                           quantities):
4624        lonlatdep, lon, lat, depth = _binary_c2nc(file_in,
4625                                                  file_out,
4626                                                  quantity)
4627        if hashed_elevation == None:
4628            elevation_file = basename_out + '_e.nc'
4629            write_elevation_nc(elevation_file,
4630                               lon,
4631                               lat,
4632                               depth)
4633            hashed_elevation = myhash(lonlatdep)
4634        else:
4635            msg = "The elevation information in the mux files is inconsistent"
4636            assert hashed_elevation == myhash(lonlatdep), msg
4637
4638    files_out.append(elevation_file)
4639
4640    return files_out
4641
4642
4643##
4644# @brief Convert a quantity URS file to a NetCDF file.
4645# @param file_in Path to input URS file.
4646# @param file_out Path to the output file.
4647# @param quantity Name of the quantity to be written to the output file.
4648# @return A tuple (lonlatdep, lon, lat, depth).
4649def _binary_c2nc(file_in, file_out, quantity):
4650    """Reads in a quantity urs file and writes a quantity nc file.
4651    Additionally, returns the depth and lat, long info,
4652    so it can be written to a file.
4653    """
4654
4655    columns = 3                           # long, lat , depth
4656    mux_file = open(file_in, 'rb')
4657
4658    # Number of points/stations
4659    (points_num,) = unpack('i', mux_file.read(4))
4660
4661    # nt, int - Number of time steps
4662    (time_step_count,) = unpack('i', mux_file.read(4))
4663
4664    #dt, float - time step, seconds
4665    (time_step,) = unpack('f', mux_file.read(4))
4666
4667    msg = "Bad data in the mux file."
4668    if points_num < 0:
4669        mux_file.close()
4670        raise ANUGAError, msg
4671    if time_step_count < 0:
4672        mux_file.close()
4673        raise ANUGAError, msg
4674    if time_step < 0:
4675        mux_file.close()
4676        raise ANUGAError, msg
4677
4678    lonlatdep = p_array.array('f')
4679    lonlatdep.read(mux_file, columns * points_num)
4680    lonlatdep = num.array(lonlatdep, dtype=num.float)
4681    lonlatdep = num.reshape(lonlatdep, (points_num, columns))
4682
4683    lon, lat, depth = lon_lat2grid(lonlatdep)
4684    lon_sorted = list(lon)
4685    lon_sorted.sort()
4686
4687    if not num.alltrue(lon == lon_sorted):
4688        msg = "Longitudes in mux file are not in ascending order"
4689        raise IOError, msg
4690
4691    lat_sorted = list(lat)
4692    lat_sorted.sort()
4693
4694    nc_file = Write_nc(quantity,
4695                       file_out,
4696                       time_step_count,
4697                       time_step,
4698                       lon,
4699                       lat)
4700
4701    for i in range(time_step_count):
4702        #Read in a time slice from mux file
4703        hz_p_array = p_array.array('f')
4704        hz_p_array.read(mux_file, points_num)
4705        hz_p = num.array(hz_p_array, dtype=num.float)
4706        hz_p = num.reshape(hz_p, (len(lon), len(lat)))
4707        hz_p = num.transpose(hz_p)  # mux has lat varying fastest, nc has long v.f.
4708
4709        #write time slice to nc file
4710        nc_file.store_timestep(hz_p)
4711
4712    mux_file.close()
4713    nc_file.close()
4714
4715    return lonlatdep, lon, lat, depth
4716
4717
4718##
4719# @brief Write an NC elevation file.
4720# @param file_out Path to the output file.
4721# @param lon ??
4722# @param lat ??
4723# @param depth_vector The elevation data to write.
4724def write_elevation_nc(file_out, lon, lat, depth_vector):
4725    """Write an nc elevation file."""
4726
4727    # NetCDF file definition
4728    outfile = NetCDFFile(file_out, netcdf_mode_w)
4729
4730    #Create new file
4731    nc_lon_lat_header(outfile, lon, lat)
4732
4733    # ELEVATION
4734    zname = 'ELEVATION'
4735    outfile.createVariable(zname, precision, (lat_name, lon_name))
4736    outfile.variables[zname].units = 'CENTIMETERS'
4737    outfile.variables[zname].missing_value = -1.e+034
4738
4739    outfile.variables[lon_name][:] = ensure_numeric(lon)
4740    outfile.variables[lat_name][:] = ensure_numeric(lat)
4741
4742    depth = num.reshape(depth_vector, (len(lat), len(lon)))
4743    outfile.variables[zname][:] = depth
4744
4745    outfile.close()
4746
4747
4748##
4749# @brief Write lat/lon headers to a NetCDF file.
4750# @param outfile Handle to open file to write to.
4751# @param lon An iterable of the longitudes.
4752# @param lat An iterable of the latitudes.
4753# @note Defines lat/long dimensions and variables. Sets various attributes:
4754#          .point_spacing  and  .units
4755#       and writes lat/lon data.
4756
4757def nc_lon_lat_header(outfile, lon, lat):
4758    """Write lat/lon headers to a NetCDF file.
4759
4760    outfile is the netcdf file handle.
4761    lon - a list/array of the longitudes
4762    lat - a list/array of the latitudes
4763    """
4764
4765    outfile.institution = 'Geoscience Australia'
4766    outfile.description = 'Converted from URS binary C'
4767
4768    # Longitude
4769    outfile.createDimension(lon_name, len(lon))
4770    outfile.createVariable(lon_name, precision, (lon_name,))
4771    outfile.variables[lon_name].point_spacing = 'uneven'
4772    outfile.variables[lon_name].units = 'degrees_east'
4773    outfile.variables[lon_name].assignValue(lon)
4774
4775    # Latitude
4776    outfile.createDimension(lat_name, len(lat))
4777    outfile.createVariable(lat_name, precision, (lat_name,))
4778    outfile.variables[lat_name].point_spacing = 'uneven'
4779    outfile.variables[lat_name].units = 'degrees_north'
4780    outfile.variables[lat_name].assignValue(lat)
4781
4782
4783##
4784# @brief
4785# @param long_lat_dep
4786# @return A tuple (long, lat, quantity).
4787# @note The latitude is the fastest varying dimension - in mux files.
4788def lon_lat2grid(long_lat_dep):
4789    """
4790    given a list of points that are assumed to be an a grid,
4791    return the long's and lat's of the grid.
4792    long_lat_dep is an array where each row is a position.
4793    The first column is longitudes.
4794    The second column is latitudes.
4795
4796    The latitude is the fastest varying dimension - in mux files
4797    """
4798
4799    LONG = 0
4800    LAT = 1
4801    QUANTITY = 2
4802
4803    long_lat_dep = ensure_numeric(long_lat_dep, num.float)
4804
4805    num_points = long_lat_dep.shape[0]
4806    this_rows_long = long_lat_dep[0,LONG]
4807
4808    # Count the length of unique latitudes
4809    i = 0
4810    while long_lat_dep[i,LONG] == this_rows_long and i < num_points:
4811        i += 1
4812
4813    # determine the lats and longsfrom the grid
4814    lat = long_lat_dep[:i, LAT]
4815    long = long_lat_dep[::i, LONG]
4816
4817    lenlong = len(long)
4818    lenlat = len(lat)
4819
4820    msg = 'Input data is not gridded'
4821    assert num_points % lenlat == 0, msg
4822    assert num_points % lenlong == 0, msg
4823
4824    # Test that data is gridded
4825    for i in range(lenlong):
4826        msg = 'Data is not gridded.  It must be for this operation'
4827        first = i * lenlat
4828        last = first + lenlat
4829
4830        assert num.allclose(long_lat_dep[first:last,LAT], lat), msg
4831        assert num.allclose(long_lat_dep[first:last,LONG], long[i]), msg
4832
4833    msg = 'Out of range latitudes/longitudes'
4834    for l in lat:assert -90 < l < 90 , msg
4835    for l in long:assert -180 < l < 180 , msg
4836
4837    # Changing quantity from lat being the fastest varying dimension to
4838    # long being the fastest varying dimension
4839    # FIXME - make this faster/do this a better way
4840    # use numeric transpose, after reshaping the quantity vector
4841    quantity = num.zeros(num_points, num.float)
4842
4843    for lat_i, _ in enumerate(lat):
4844        for long_i, _ in enumerate(long):
4845            q_index = lat_i*lenlong + long_i
4846            lld_index = long_i*lenlat + lat_i
4847            temp = long_lat_dep[lld_index, QUANTITY]
4848            quantity[q_index] = temp
4849
4850    return long, lat, quantity
4851
4852################################################################################
4853# END URS 2 SWW
4854################################################################################
4855
4856################################################################################
4857# URS UNGRIDDED 2 SWW
4858################################################################################
4859
4860### PRODUCING THE POINTS NEEDED FILE ###
4861
4862# Ones used for FESA 2007 results
4863#LL_LAT = -50.0
4864#LL_LONG = 80.0
4865#GRID_SPACING = 1.0/60.0
4866#LAT_AMOUNT = 4800
4867#LONG_AMOUNT = 3600
4868
4869
4870##
4871# @brief
4872# @param file_name
4873# @param boundary_polygon
4874# @param zone
4875# @param ll_lat
4876# @param ll_long
4877# @param grid_spacing
4878# @param lat_amount
4879# @param long_amount
4880# @param isSouthernHemisphere
4881# @param export_csv
4882# @param use_cache
4883# @param verbose True if this function is to be verbose.
4884# @return
4885def URS_points_needed_to_file(file_name, boundary_polygon, zone,
4886                              ll_lat, ll_long,
4887                              grid_spacing,
4888                              lat_amount, long_amount,
4889                              isSouthernHemisphere=True,
4890                              export_csv=False, use_cache=False,
4891                              verbose=False):
4892    """
4893    Given the info to replicate the URS grid and a polygon output
4894    a file that specifies the cloud of boundary points for URS.
4895
4896    This creates a .urs file.  This is in the format used by URS;
4897    1st line is the number of points,
4898    each line after represents a point,in lats and longs.
4899
4900    Note: The polygon cannot cross zones or hemispheres.
4901
4902    A work-a-round for different zones or hemispheres is to run this twice,
4903    once for each zone, and then combine the output.
4904
4905    file_name - name of the urs file produced for David.
4906    boundary_polygon - a list of points that describes a polygon.
4907                      The last point is assumed ot join the first point.
4908                      This is in UTM (lat long would be better though)
4909
4910     This is info about the URS model that needs to be inputted.
4911
4912    ll_lat - lower left latitude, in decimal degrees
4913    ll-long - lower left longitude, in decimal degrees
4914    grid_spacing - in deciamal degrees
4915    lat_amount - number of latitudes
4916    long_amount- number of longs
4917
4918    Don't add the file extension.  It will be added.
4919    """
4920
4921    geo = URS_points_needed(boundary_polygon, zone, ll_lat, ll_long,
4922                            grid_spacing,
4923                            lat_amount, long_amount, isSouthernHemisphere,
4924                            use_cache, verbose)
4925
4926    if not file_name[-4:] == ".urs":
4927        file_name += ".urs"
4928
4929    geo.export_points_file(file_name, isSouthHemisphere=isSouthernHemisphere)
4930
4931    if export_csv:
4932        if file_name[-4:] == ".urs":
4933            file_name = file_name[:-4] + ".csv"
4934        geo.export_points_file(file_name)
4935
4936    return geo
4937
4938
4939##
4940# @brief
4941# @param boundary_polygon
4942# @param zone
4943# @param ll_lat
4944# @param ll_long
4945# @param grid_spacing
4946# @param lat_amount
4947# @param long_amount
4948# @param isSouthHemisphere
4949# @param use_cache
4950# @param verbose
4951def URS_points_needed(boundary_polygon, zone, ll_lat,
4952                      ll_long, grid_spacing,
4953                      lat_amount, long_amount, isSouthHemisphere=True,
4954                      use_cache=False, verbose=False):
4955    args = (boundary_polygon,
4956            zone, ll_lat,
4957            ll_long, grid_spacing,
4958            lat_amount, long_amount, isSouthHemisphere)
4959    kwargs = {}
4960
4961    if use_cache is True:
4962        try:
4963            from anuga.caching import cache
4964        except:
4965            msg = 'Caching was requested, but caching module' \
4966                  'could not be imported'
4967            raise msg
4968
4969        geo = cache(_URS_points_needed,
4970                    args, kwargs,
4971                    verbose=verbose,
4972                    compression=False)
4973    else:
4974        geo = apply(_URS_points_needed, args, kwargs)
4975
4976    return geo
4977
4978
4979##
4980# @brief
4981# @param boundary_polygon An iterable of points that describe a polygon.
4982# @param zone
4983# @param ll_lat Lower left latitude, in decimal degrees
4984# @param ll_long Lower left longitude, in decimal degrees
4985# @param grid_spacing Grid spacing in decimal degrees.
4986# @param lat_amount
4987# @param long_amount
4988# @param isSouthHemisphere
4989def _URS_points_needed(boundary_polygon,
4990                       zone, ll_lat,
4991                       ll_long, grid_spacing,
4992                       lat_amount, long_amount,
4993                       isSouthHemisphere):
4994    """
4995    boundary_polygon - a list of points that describes a polygon.
4996                      The last point is assumed ot join the first point.
4997                      This is in UTM (lat long would b better though)
4998
4999    ll_lat - lower left latitude, in decimal degrees
5000    ll-long - lower left longitude, in decimal degrees
5001    grid_spacing - in decimal degrees
5002
5003    """
5004
5005    msg = "grid_spacing can not be zero"
5006    assert not grid_spacing == 0, msg
5007
5008    a = boundary_polygon
5009
5010    # List of segments.  Each segment is two points.
5011    segs = [i and [a[i-1], a[i]] or [a[len(a)-1], a[0]] for i in range(len(a))]
5012
5013    # convert the segs to Lat's and longs.
5014    # Don't assume the zone of the segments is the same as the lower left
5015    # corner of the lat long data!!  They can easily be in different zones
5016    lat_long_set = frozenset()
5017    for seg in segs:
5018        points_lat_long = points_needed(seg, ll_lat, ll_long, grid_spacing,
5019                                        lat_amount, long_amount, zone,
5020                                        isSouthHemisphere)
5021        lat_long_set |= frozenset(points_lat_long)
5022
5023    if lat_long_set == frozenset([]):
5024        msg = "URS region specified and polygon does not overlap."
5025        raise ValueError, msg
5026
5027    # Warning there is no info in geospatial saying the hemisphere of
5028    # these points.  There should be.
5029    geo = Geospatial_data(data_points=list(lat_long_set),
5030                          points_are_lats_longs=True)
5031
5032    return geo
5033
5034
5035##
5036# @brief Get the points that are needed to interpolate any point a a segment.
5037# @param seg Two points in the UTM.
5038# @param ll_lat Lower left latitude, in decimal degrees
5039# @param ll_long Lower left longitude, in decimal degrees
5040# @param grid_spacing
5041# @param lat_amount
5042# @param long_amount
5043# @param zone
5044# @param isSouthHemisphere
5045# @return A list of points.
5046def points_needed(seg, ll_lat, ll_long, grid_spacing,
5047                  lat_amount, long_amount, zone,
5048                  isSouthHemisphere):
5049    """
5050    seg is two points, in UTM
5051    return a list of the points, in lats and longs that are needed to
5052    interpolate any point on the segment.
5053    """
5054
5055    from math import sqrt
5056
5057    geo_reference = Geo_reference(zone=zone)
5058    geo = Geospatial_data(seg, geo_reference=geo_reference)
5059    seg_lat_long = geo.get_data_points(as_lat_long=True,
5060                                       isSouthHemisphere=isSouthHemisphere)
5061
5062    # 1.415 = 2^0.5, rounded up....
5063    sqrt_2_rounded_up = 1.415
5064    buffer = sqrt_2_rounded_up * grid_spacing
5065
5066    max_lat = max(seg_lat_long[0][0], seg_lat_long[1][0]) + buffer
5067    max_long = max(seg_lat_long[0][1], seg_lat_long[1][1]) + buffer
5068    min_lat = min(seg_lat_long[0][0], seg_lat_long[1][0]) - buffer
5069    min_long = min(seg_lat_long[0][1], seg_lat_long[1][1]) - buffer
5070
5071    first_row = (min_long - ll_long) / grid_spacing
5072
5073    # To round up
5074    first_row_long = int(round(first_row + 0.5))
5075
5076    last_row = (max_long - ll_long) / grid_spacing # round down
5077    last_row_long = int(round(last_row))
5078
5079    first_row = (min_lat - ll_lat) / grid_spacing
5080    # To round up
5081    first_row_lat = int(round(first_row + 0.5))
5082
5083    last_row = (max_lat - ll_lat) / grid_spacing # round down
5084    last_row_lat = int(round(last_row))
5085
5086    max_distance = 157147.4112 * grid_spacing
5087    points_lat_long = []
5088
5089    # Create a list of the lat long points to include.
5090    for index_lat in range(first_row_lat, last_row_lat + 1):
5091        for index_long in range(first_row_long, last_row_long + 1):
5092            lat = ll_lat + index_lat*grid_spacing
5093            long = ll_long + index_long*grid_spacing
5094
5095            #filter here to keep good points
5096            if keep_point(lat, long, seg, max_distance):
5097                points_lat_long.append((lat, long)) #must be hashable
5098
5099    # Now that we have these points, lets throw ones out that are too far away
5100    return points_lat_long
5101
5102
5103##
5104# @brief
5105# @param lat
5106# @param long
5107# @param seg Two points in UTM.
5108# @param max_distance
5109def keep_point(lat, long, seg, max_distance):
5110    """
5111    seg is two points, UTM
5112    """
5113
5114    from math import sqrt
5115
5116    _ , x0, y0 = redfearn(lat, long)
5117    x1 = seg[0][0]
5118    y1 = seg[0][1]
5119    x2 = seg[1][0]
5120    y2 = seg[1][1]
5121    x2_1 = x2-x1
5122    y2_1 = y2-y1
5123    try:
5124        d = abs((x2_1)*(y1-y0)-(x1-x0)*(y2_1))/sqrt( \
5125            (x2_1)*(x2_1)+(y2_1)*(y2_1))
5126    except ZeroDivisionError:
5127        if sqrt((x2_1)*(x2_1)+(y2_1)*(y2_1)) == 0 \
5128           and abs((x2_1)*(y1-y0)-(x1-x0)*(y2_1)) == 0:
5129            return True
5130        else:
5131            return False
5132
5133    return d <= max_distance
5134
5135################################################################################
5136# CONVERTING UNGRIDDED URS DATA TO AN SWW FILE
5137################################################################################
5138
5139WAVEHEIGHT_MUX_LABEL = '-z-mux'
5140EAST_VELOCITY_LABEL =  '-e-mux'
5141NORTH_VELOCITY_LABEL =  '-n-mux'
5142
5143##
5144# @brief Convert URS file(s) (wave prop) to an SWW file.
5145# @param basename_in Stem of the input filenames.
5146# @param basename_out Path to the output SWW file.
5147# @param verbose True if this function is to be verbose.
5148# @param mint
5149# @param maxt
5150# @param mean_stage
5151# @param origin Tuple with geo-ref UTM coordinates (zone, easting, northing).
5152# @param hole_points_UTM
5153# @param zscale
5154# @note Also convert latitude and longitude to UTM. All coordinates are
5155#       assumed to be given in the GDA94 datum.
5156# @note Input filename stem has suffixes '-z-mux', '-e-mux' and '-n-mux'
5157#       added for relative height, x-velocity and y-velocity respectively.
5158def urs_ungridded2sww(basename_in='o', basename_out=None, verbose=False,
5159                      mint=None, maxt=None,
5160                      mean_stage=0,
5161                      origin=None,
5162                      hole_points_UTM=None,
5163                      zscale=1):
5164    """
5165    Convert URS C binary format for wave propagation to
5166    sww format native to abstract_2d_finite_volumes.
5167
5168    Specify only basename_in and read files of the form
5169    basefilename-z-mux, basefilename-e-mux and
5170    basefilename-n-mux containing relative height,
5171    x-velocity and y-velocity, respectively.
5172
5173    Also convert latitude and longitude to UTM. All coordinates are
5174    assumed to be given in the GDA94 datum. The latitude and longitude
5175    information is assumed ungridded grid.
5176
5177    min's and max's: If omitted - full extend is used.
5178    To include a value min ans max may equal it.
5179    Lat and lon are assumed to be in decimal degrees.
5180
5181    origin is a 3-tuple with geo referenced
5182    UTM coordinates (zone, easting, northing)
5183    It will be the origin of the sww file. This shouldn't be used,
5184    since all of anuga should be able to handle an arbitary origin.
5185    The mux point info is NOT relative to this origin.
5186
5187    URS C binary format has data organised as TIME, LONGITUDE, LATITUDE
5188    which means that latitude is the fastest
5189    varying dimension (row major order, so to speak)
5190
5191    In URS C binary the latitudes and longitudes are in assending order.
5192
5193    Note, interpolations of the resulting sww file will be different
5194    from results of urs2sww.  This is due to the interpolation
5195    function used, and the different grid structure between urs2sww
5196    and this function.
5197
5198    Interpolating data that has an underlying gridded source can
5199    easily end up with different values, depending on the underlying
5200    mesh.
5201
5202    consider these 4 points
5203    50  -50
5204
5205    0     0
5206
5207    The grid can be
5208     -
5209    |\|   A
5210     -
5211     or;
5212      -
5213     |/|  B
5214      -
5215
5216    If a point is just below the center of the midpoint, it will have a
5217    +ve value in grid A and a -ve value in grid B.
5218    """
5219
5220    from anuga.mesh_engine.mesh_engine import NoTrianglesError
5221    from anuga.pmesh.mesh import Mesh
5222
5223    files_in = [basename_in + WAVEHEIGHT_MUX_LABEL,
5224                basename_in + EAST_VELOCITY_LABEL,
5225                basename_in + NORTH_VELOCITY_LABEL]
5226    quantities = ['HA','UA','VA']
5227
5228    # instantiate urs_points of the three mux files.
5229    mux = {}
5230    for quantity, file in map(None, quantities, files_in):
5231        mux[quantity] = Urs_points(file)
5232
5233    # Could check that the depth is the same. (hashing)
5234
5235    # handle to a mux file to do depth stuff
5236    a_mux = mux[quantities[0]]
5237
5238    # Convert to utm
5239    lat = a_mux.lonlatdep[:,1]
5240    long = a_mux.lonlatdep[:,0]
5241    points_utm, zone = convert_from_latlon_to_utm(latitudes=lat,
5242                                                  longitudes=long)
5243
5244    elevation = a_mux.lonlatdep[:,2] * -1
5245
5246    # grid (create a mesh from the selected points)
5247    # This mesh has a problem.  Triangles are streched over ungridded areas.
5248    # If these areas could be described as holes in pmesh, that would be great.
5249
5250    # I can't just get the user to selection a point in the middle.
5251    # A boundary is needed around these points.
5252    # But if the zone of points is obvious enough auto-segment should do
5253    # a good boundary.
5254    mesh = Mesh()
5255    mesh.add_vertices(points_utm)
5256    mesh.auto_segment(smooth_indents=True, expand_pinch=True)
5257
5258    # To try and avoid alpha shape 'hugging' too much
5259    mesh.auto_segment(mesh.shape.get_alpha() * 1.1)
5260    if hole_points_UTM is not None:
5261        point = ensure_absolute(hole_points_UTM)
5262        mesh.add_hole(point[0], point[1])
5263
5264    try:
5265        mesh.generate_mesh(minimum_triangle_angle=0.0, verbose=False)
5266    except NoTrianglesError:
5267        # This is a bit of a hack, going in and changing the data structure.
5268        mesh.holes = []
5269        mesh.generate_mesh(minimum_triangle_angle=0.0, verbose=False)
5270
5271    mesh_dic = mesh.Mesh2MeshList()
5272
5273    #mesh.export_mesh_file(basename_in + '_168.tsh')
5274    #import sys; sys.exit()
5275    # These are the times of the mux file
5276    mux_times = []
5277    for i in range(a_mux.time_step_count):
5278        mux_times.append(a_mux.time_step * i)
5279    (mux_times_start_i, mux_times_fin_i) = mux2sww_time(mux_times, mint, maxt)
5280    times = mux_times[mux_times_start_i:mux_times_fin_i]
5281
5282    if mux_times_start_i == mux_times_fin_i:
5283        # Close the mux files
5284        for quantity, file in map(None, quantities, files_in):
5285            mux[quantity].close()
5286        msg = "Due to mint and maxt there's no time info in the boundary SWW."
5287        raise Exception, msg
5288
5289    # If this raise is removed there is currently no downstream errors
5290
5291    points_utm=ensure_numeric(points_utm)
5292    assert num.alltrue(ensure_numeric(mesh_dic['generatedpointlist'])
5293                       == ensure_numeric(points_utm))
5294
5295    volumes = mesh_dic['generatedtrianglelist']
5296
5297    # write sww intro and grid stuff.
5298    if basename_out is None:
5299        swwname = basename_in + '.sww'
5300    else:
5301        swwname = basename_out + '.sww'
5302
5303    if verbose: print 'Output to ', swwname
5304
5305    outfile = NetCDFFile(swwname, netcdf_mode_w)
5306
5307    # For a different way of doing this, check out tsh2sww
5308    # work out sww_times and the index range this covers
5309    sww = Write_sww()
5310    sww.store_header(outfile, times, len(volumes), len(points_utm),
5311                     verbose=verbose, sww_precision=netcdf_float)
5312    outfile.mean_stage = mean_stage
5313    outfile.zscale = zscale
5314
5315    sww.store_triangulation(outfile, points_utm, volumes,
5316                            elevation, zone,  new_origin=origin,
5317                            verbose=verbose)
5318
5319    if verbose: print 'Converting quantities'
5320
5321    # Read in a time slice from each mux file and write it to the SWW file
5322    j = 0
5323    for ha, ua, va in map(None, mux['HA'], mux['UA'], mux['VA']):
5324        if j >= mux_times_start_i and j < mux_times_fin_i:
5325            stage = zscale*ha + mean_stage
5326            h = stage - elevation
5327            xmomentum = ua*h
5328            ymomentum = -1 * va * h # -1 since in mux files south is positive.
5329            sww.store_quantities(outfile,
5330                                 slice_index=j-mux_times_start_i,
5331                                 verbose=verbose,
5332                                 stage=stage,
5333                                 xmomentum=xmomentum,
5334                                 ymomentum=ymomentum,
5335                                 sww_precision=num.float)
5336        j += 1
5337
5338    if verbose: sww.verbose_quantities(outfile)
5339
5340    outfile.close()
5341
5342    # Do some conversions while writing the sww file
5343
5344
5345################################################################################
5346# READ MUX2 FILES line of points
5347################################################################################
5348
5349WAVEHEIGHT_MUX2_LABEL = '-z-mux2'
5350EAST_VELOCITY_MUX2_LABEL = '-e-mux2'
5351NORTH_VELOCITY_MUX2_LABEL = '-n-mux2'
5352
5353##
5354# @brief
5355# @param filenames List of mux2 format input filenames.
5356# @param weights Weights associated with each source file.
5357# @param permutation The gauge numbers for which data is extracted.
5358# @param verbose True if this function is to be verbose.
5359# @return (times, latitudes, longitudes, elevation, quantity, starttime)
5360def read_mux2_py(filenames,
5361                 weights=None,
5362                 permutation=None,
5363                 verbose=False):
5364    """Access the mux files specified in the filenames list. Combine the
5365       data found therin as a weighted linear sum as specifed by the weights.
5366       If permutation is None or empty extract timeseries data for all gauges
5367       within the files.
5368
5369       Input:
5370           filenames:   List of filenames specifiying the file containing the
5371                        timeseries data (mux2 format) for each source
5372           weights:     Weighs associated with each source
5373                        (defaults to 1 for each source)
5374           permutation: Specifies the gauge numbers that for which data is to be
5375                        extracted
5376    """
5377
5378    from urs_ext import read_mux2
5379
5380    numSrc = len(filenames)
5381
5382    file_params = -1 * num.ones(3, num.float)                    # [nsta,dt,nt]
5383
5384    # Convert verbose to int C flag
5385    if verbose:
5386        verbose=1
5387    else:
5388        verbose=0
5389
5390    if weights is None:
5391        weights = num.ones(numSrc, num.int)     #array default#
5392
5393    if permutation is None:
5394        permutation = ensure_numeric([], num.float)
5395
5396    # Call underlying C implementation urs2sts_ext.c
5397    data = read_mux2(numSrc, filenames, weights, file_params,
5398                     permutation, verbose)
5399
5400    msg = 'File parameter values were not read in correctly from c file'
5401    assert len(num.compress(file_params > 0, file_params)) != 0, msg
5402
5403    msg = 'The number of stations specifed in the c array and in the file ' \
5404          'are inconsistent'
5405    assert file_params[0] >= len(permutation), msg
5406
5407    msg = 'The number of stations returned is inconsistent with ' \
5408          'the requested number'
5409    assert len(permutation) == 0 or len(permutation) == data.shape[0], msg
5410
5411    nsta = int(file_params[0])
5412    msg = 'Must have at least one station'
5413    assert nsta > 0, msg
5414
5415    dt = file_params[1]
5416    msg = 'Must have a postive timestep'
5417    assert dt > 0, msg
5418
5419    nt = int(file_params[2])
5420    msg = 'Must have at least one gauge value'
5421    assert nt > 0, msg
5422
5423    OFFSET = 5 # Number of site parameters p passed back with data
5424               # p = [geolat,geolon,depth,start_tstep,finish_tstep]
5425
5426    # FIXME (Ole): What is the relationship with params and data.shape ?
5427    # It looks as if the following asserts should pass but they don't always
5428    #
5429    #msg = 'nt = %d, data.shape[1] == %d' %(nt, data.shape[1])
5430    #assert nt == data.shape[1] - OFFSET, msg
5431    #
5432    #msg = 'nsta = %d, data.shape[0] == %d' %(nsta, data.shape[0])
5433    #assert nsta == data.shape[0], msg
5434
5435    # Number of stations in ordering file
5436    number_of_selected_stations = data.shape[0]
5437
5438    # Index where data ends and parameters begin
5439    parameters_index = data.shape[1] - OFFSET
5440
5441    times = dt * num.arange(parameters_index)
5442    latitudes = num.zeros(number_of_selected_stations, num.float)
5443    longitudes = num.zeros(number_of_selected_stations, num.float)
5444    elevation = num.zeros(number_of_selected_stations, num.float)
5445    quantity = num.zeros((number_of_selected_stations, parameters_index), num.float)
5446
5447    starttime = 1e16
5448    for i in range(number_of_selected_stations):
5449        quantity[i][:] = data[i][:parameters_index]
5450        latitudes[i] = data[i][parameters_index]
5451        longitudes[i] = data[i][parameters_index+1]
5452        elevation[i] = -data[i][parameters_index+2]
5453        first_time_step = data[i][parameters_index+3]
5454        starttime = min(dt*first_time_step, starttime)
5455
5456    return times, latitudes, longitudes, elevation, quantity, starttime
5457
5458
5459##
5460# @brief ??
5461# @param mux_times ??
5462# @param mint ??
5463# @param maxt ??
5464# @return ??
5465def mux2sww_time(mux_times, mint, maxt):
5466    """
5467    """
5468
5469    if mint == None:
5470        mux_times_start_i = 0
5471    else:
5472        mux_times_start_i = num.searchsorted(mux_times, mint)
5473
5474    if maxt == None:
5475        mux_times_fin_i = len(mux_times)
5476    else:
5477        maxt += 0.5 # so if you specify a time where there is
5478                    # data that time will be included
5479        mux_times_fin_i = num.searchsorted(mux_times, maxt)
5480
5481    return mux_times_start_i, mux_times_fin_i
5482
5483
5484##
5485# @brief Convert a URS (mux2, wave propagation) file to an STS file.
5486# @param basename_in String (or list) of source file stems.
5487# @param basename_out Stem of output STS file path.
5488# @param weights
5489# @param verbose True if this function is to be verbose.
5490# @param origin Tuple with with geo-ref UTM coords (zone, easting, northing).
5491# @param zone
5492# @param mean_stage
5493# @param zscale
5494# @param ordering_filename Path of a file specifying which mux2 gauge points are
5495#                          to be stored.
5496# @note Also convert latitude and longitude to UTM. All coordinates are
5497#       assumed to be given in the GDA94 datum.
5498def urs2sts(basename_in, basename_out=None,
5499            weights=None,
5500            verbose=False,
5501            origin=None,
5502            zone=None,
5503            central_meridian=None,           
5504            mean_stage=0.0,
5505            zscale=1.0,
5506            ordering_filename=None):
5507    """Convert URS mux2 format for wave propagation to sts format
5508
5509    Also convert latitude and longitude to UTM. All coordinates are
5510    assumed to be given in the GDA94 datum
5511
5512    origin is a 3-tuple with geo referenced
5513    UTM coordinates (zone, easting, northing)
5514
5515    inputs:
5516
5517    basename_in: list of source file prefixes
5518
5519        These are combined with the extensions:
5520        WAVEHEIGHT_MUX2_LABEL = '-z-mux2' for stage
5521        EAST_VELOCITY_MUX2_LABEL = '-e-mux2' xmomentum
5522        NORTH_VELOCITY_MUX2_LABEL = '-n-mux2' and ymomentum
5523
5524        to create a 2D list of mux2 file. The rows are associated with each
5525        quantity and must have the above extensions
5526        the columns are the list of file prefixes.
5527
5528    ordering: a .txt file name specifying which mux2 gauge points are
5529              to be stored. This is indicated by the index of the gauge
5530              in the ordering file.
5531
5532              ordering file format:
5533              1st line:    'index,longitude,latitude\n'
5534              other lines: index,longitude,latitude
5535
5536              If ordering is None or ordering file is empty then
5537               all points are taken in the order they
5538              appear in the mux2 file.
5539
5540
5541    output:
5542      basename_out: name of sts file in which mux2 data is stored.
5543     
5544     
5545     
5546    NOTE: South is positive in mux files so sign of y-component of velocity is reverted
5547    """
5548
5549    import os
5550    from Scientific.IO.NetCDF import NetCDFFile
5551    from types import ListType,StringType
5552    from operator import __and__
5553
5554    if not isinstance(basename_in, ListType):
5555        if verbose: print 'Reading single source'
5556        basename_in = [basename_in]
5557
5558    # This is the value used in the mux file format to indicate NAN data
5559    # FIXME (Ole): This should be changed everywhere to IEEE NAN when
5560    #              we upgrade to Numpy
5561    NODATA = 99
5562
5563    # Check that basename is a list of strings
5564    if not reduce(__and__, map(lambda z:isinstance(z,StringType), basename_in)):
5565        msg= 'basename_in must be a string or list of strings'
5566        raise Exception, msg
5567
5568    # Find the number of sources to be used
5569    numSrc = len(basename_in)
5570
5571    # A weight must be specified for each source
5572    if weights is None:
5573        # Default is equal weighting
5574        weights = num.ones(numSrc, num.float) / numSrc
5575    else:
5576        weights = ensure_numeric(weights)
5577        msg = 'When combining multiple sources must specify a weight for ' \
5578              'mux2 source file'
5579        assert len(weights) == numSrc, msg
5580
5581    if verbose: print 'Weights used in urs2sts:', weights
5582
5583    # Check output filename
5584    if basename_out is None:
5585        msg = 'STS filename must be specified as basename_out ' \
5586              'in function urs2sts'
5587        raise Exception, msg
5588
5589    if basename_out.endswith('.sts'):
5590        stsname = basename_out
5591    else:
5592        stsname = basename_out + '.sts'
5593
5594    # Create input filenames from basenames and check their existence
5595    files_in = [[], [], []]
5596    for files in basename_in:
5597        files_in[0].append(files + WAVEHEIGHT_MUX2_LABEL),
5598        files_in[1].append(files + EAST_VELOCITY_MUX2_LABEL)
5599        files_in[2].append(files + NORTH_VELOCITY_MUX2_LABEL)
5600
5601    quantities = ['HA','UA','VA'] # Quantity names used in the MUX2 format
5602    for i in range(len(quantities)):
5603        for file_in in files_in[i]:
5604            if (os.access(file_in, os.R_OK) == 0):
5605                msg = 'File %s does not exist or is not accessible' % file_in
5606                raise IOError, msg
5607
5608    # Establish permutation array
5609    if ordering_filename is not None:
5610        if verbose is True: print 'Reading ordering file', ordering_filename
5611
5612        # Read ordering file
5613        try:
5614            fid = open(ordering_filename, 'r')
5615            file_header = fid.readline().split(',')
5616            ordering_lines = fid.readlines()
5617            fid.close()
5618        except:
5619            msg = 'Cannot open %s' % ordering_filename
5620            raise Exception, msg
5621
5622        reference_header = 'index, longitude, latitude\n'
5623        reference_header_split = reference_header.split(',')
5624        for i in range(3):
5625            if not file_header[i].strip() == reference_header_split[i].strip():
5626                msg = 'File must contain header: ' + reference_header
5627                raise Exception, msg
5628
5629        if len(ordering_lines) < 2:
5630            msg = 'File must contain at least two points'
5631            raise Exception, msg
5632
5633        permutation = [int(line.split(',')[0]) for line in ordering_lines]
5634        permutation = ensure_numeric(permutation)
5635    else:
5636        permutation = None
5637
5638    # Read MUX2 files
5639    if (verbose): print 'reading mux2 file'
5640
5641    mux={}
5642    for i, quantity in enumerate(quantities):
5643        # For each quantity read the associated list of source mux2 file with
5644        # extention associated with that quantity
5645
5646        times, latitudes, longitudes, elevation, mux[quantity], starttime \
5647            = read_mux2_py(files_in[i], weights, permutation, verbose=verbose)
5648
5649        # Check that all quantities have consistent time and space information
5650        if quantity != quantities[0]:
5651            msg = '%s, %s and %s have inconsistent gauge data' \
5652                  % (files_in[0], files_in[1], files_in[2])
5653            assert num.allclose(times, times_old), msg
5654            assert num.allclose(latitudes, latitudes_old), msg
5655            assert num.allclose(longitudes, longitudes_old), msg
5656            assert num.allclose(elevation, elevation_old), msg
5657            assert num.allclose(starttime, starttime_old), msg
5658        times_old = times
5659        latitudes_old = latitudes
5660        longitudes_old = longitudes
5661        elevation_old = elevation
5662        starttime_old = starttime
5663
5664        # Self check - can be removed to improve speed
5665        #ref_longitudes = [float(line.split(',')[1]) for line in ordering_lines]
5666        #ref_latitudes = [float(line.split(',')[2]) for line in ordering_lines]
5667        #
5668        #msg = 'Longitudes specified in ordering file do not match those ' \
5669        #      'found in mux files. ' \
5670        #      'I got %s instead of %s (only beginning shown)' \
5671        #      % (str(longitudes[:10]) + '...',
5672        #         str(ref_longitudes[:10]) + '...')
5673        #assert allclose(longitudes, ref_longitudes), msg
5674        #
5675        #msg = 'Latitudes specified in ordering file do not match those ' \
5676        #      'found in mux files. '
5677        #      'I got %s instead of %s (only beginning shown)' \
5678        #      % (str(latitudes[:10]) + '...',
5679        #         str(ref_latitudes[:10]) + '...')
5680        #assert allclose(latitudes, ref_latitudes), msg
5681
5682    # Store timeseries in STS file
5683    msg = 'File is empty and or clipped region not in file region'
5684    assert len(latitudes > 0), msg
5685
5686    number_of_points = latitudes.shape[0]      # Number of stations retrieved
5687    number_of_times = times.shape[0]           # Number of timesteps
5688    number_of_latitudes = latitudes.shape[0]   # Number latitudes
5689    number_of_longitudes = longitudes.shape[0] # Number longitudes
5690
5691    # The permutation vector of contains original indices
5692    # as given in ordering file or None in which case points
5693    # are assigned the trivial indices enumerating them from
5694    # 0 to number_of_points-1
5695    if permutation is None:
5696        permutation = num.arange(number_of_points, dtype=num.int)
5697
5698    # NetCDF file definition
5699    outfile = NetCDFFile(stsname, netcdf_mode_w)
5700
5701    description = 'Converted from URS mux2 files: %s' % basename_in
5702
5703    # Create new file
5704    sts = Write_sts()
5705    sts.store_header(outfile,
5706                     times+starttime,
5707                     number_of_points,
5708                     description=description,
5709                     verbose=verbose,
5710                     sts_precision=netcdf_float)
5711
5712    # Store
5713    from anuga.coordinate_transforms.redfearn import redfearn
5714
5715    x = num.zeros(number_of_points, num.float)  # Easting
5716    y = num.zeros(number_of_points, num.float)  # Northing
5717
5718    # Check zone boundaries
5719    if zone is None:
5720        refzone, _, _ = redfearn(latitudes[0], longitudes[0],
5721                                 central_meridian=central_meridian)
5722    else:
5723        refzone = zone
5724
5725    old_zone = refzone
5726
5727    for i in range(number_of_points):
5728        computed_zone, easting, northing = redfearn(latitudes[i], longitudes[i],
5729                                                    zone=zone,
5730                                                    central_meridian=central_meridian)
5731        x[i] = easting
5732        y[i] = northing
5733        if computed_zone != refzone:
5734            msg = 'All sts gauges need to be in the same zone. \n'
5735            msg += 'offending gauge:Zone %d,%.4f, %4f\n' \
5736                   % (computed_zone, easting, northing)
5737            msg += 'previous gauge:Zone %d,%.4f, %4f' \
5738                   % (old_zone, old_easting, old_northing)
5739            raise Exception, msg
5740        old_zone = computed_zone
5741        old_easting = easting
5742        old_northing = northing
5743
5744    if origin is None:
5745        origin = Geo_reference(refzone, min(x), min(y))
5746    geo_ref = write_NetCDF_georeference(origin, outfile)
5747
5748    elevation = num.resize(elevation, outfile.variables['elevation'][:].shape)
5749    outfile.variables['permutation'][:] = permutation.astype(num.int32) # Opteron 64
5750    outfile.variables['x'][:] = x - geo_ref.get_xllcorner()
5751    outfile.variables['y'][:] = y - geo_ref.get_yllcorner()
5752    outfile.variables['elevation'][:] = elevation
5753
5754    stage = outfile.variables['stage']
5755    xmomentum = outfile.variables['xmomentum']
5756    ymomentum = outfile.variables['ymomentum']
5757
5758    if verbose: print 'Converting quantities'
5759
5760    for j in range(len(times)):
5761        for i in range(number_of_points):
5762            ha = mux['HA'][i,j]
5763            ua = mux['UA'][i,j]
5764            va = mux['VA'][i,j]
5765            if ha == NODATA:
5766                if verbose:
5767                    msg = 'Setting nodata value %d to 0 at time = %f, ' \
5768                          'point = %d' % (ha, times[j], i)
5769                    print msg
5770                ha = 0.0
5771                ua = 0.0
5772                va = 0.0
5773
5774            w = zscale*ha + mean_stage
5775            h = w - elevation[i]
5776            stage[j,i] = w
5777
5778            xmomentum[j,i] = ua * h
5779            ymomentum[j,i] = -va * h # South is positive in mux files
5780
5781
5782    outfile.close()
5783
5784
5785##
5786# @brief Create a list of points defining a boundary from an STS file.
5787# @param stsname Stem of path to the STS file to read.
5788# @return A list of boundary points.
5789def create_sts_boundary(stsname):
5790    """Create a list of points defining a boundary from an STS file.
5791
5792    Create boundary segments from .sts file. Points can be stored in
5793    arbitrary order within the .sts file. The order in which the .sts points
5794    make up the boundary are given in order.txt file
5795
5796    FIXME:
5797    Point coordinates are stored in relative eastings and northings.
5798    But boundary is produced in absolute coordinates
5799    """
5800
5801    try:
5802        fid = NetCDFFile(stsname + '.sts', netcdf_mode_r)
5803    except:
5804        msg = 'Cannot open %s' % stsname + '.sts'
5805        raise msg
5806
5807    xllcorner = fid.xllcorner[0]
5808    yllcorner = fid.yllcorner[0]
5809
5810    #Points stored in sts file are normalised to [xllcorner,yllcorner] but
5811    #we cannot assume that boundary polygon will be. At least the
5812    #additional points specified by the user after this function is called
5813    x = fid.variables['x'][:] + xllcorner
5814    y = fid.variables['y'][:] + yllcorner
5815
5816    x = num.reshape(x, (len(x),1))
5817    y = num.reshape(y, (len(y),1))
5818    sts_points = num.concatenate((x,y), axis=1)
5819
5820    return sts_points.tolist()
5821
5822
5823##
5824# @brief A class to write an SWW file.
5825class Write_sww:
5826    from anuga.shallow_water.shallow_water_domain import Domain
5827
5828    # FIXME (Ole): Hardwiring the conserved quantities like
5829    # this could be a problem. I would prefer taking them from
5830    # the instantiation of Domain.
5831    #
5832    # (DSG) There is not always a Domain instance when Write_sww is used.
5833    # Check to see if this is the same level of hardwiring as is in
5834    # shallow water doamain.
5835
5836    sww_quantities = Domain.conserved_quantities
5837
5838    RANGE = '_range'
5839    EXTREMA = ':extrema'
5840
5841    ##
5842    # brief Instantiate the SWW writer class.
5843    def __init__(self):
5844        pass
5845
5846    ##
5847    # @brief Store a header in the SWW file.
5848    # @param outfile Open handle to the file that will be written.
5849    # @param times A list of time slices *or* a start time.
5850    # @param number_of_volumes The number of triangles.
5851    # @param number_of_points The number of points.
5852    # @param description The internal file description string.
5853    # @param smoothing True if smoothing is to be used.
5854    # @param order
5855    # @param sww_precision Data type of the quantity written (netcdf constant)
5856    # @param verbose True if this function is to be verbose.
5857    # @note If 'times' is a list, the info will be made relative.
5858    def store_header(self,
5859                     outfile,
5860                     times,
5861                     number_of_volumes,
5862                     number_of_points,
5863                     description='Converted from XXX',
5864                     smoothing=True,
5865                     order=1,
5866                     sww_precision=netcdf_float32,
5867                     verbose=False):
5868        """Write an SWW file header.
5869
5870        outfile - the open file that will be written
5871        times - A list of the time slice times OR a start time
5872        Note, if a list is given the info will be made relative.
5873        number_of_volumes - the number of triangles
5874        """
5875
5876        outfile.institution = 'Geoscience Australia'
5877        outfile.description = description
5878
5879        # For sww compatibility
5880        if smoothing is True:
5881            # Smoothing to be depreciated
5882            outfile.smoothing = 'Yes'
5883            outfile.vertices_are_stored_uniquely = 'False'
5884        else:
5885            # Smoothing to be depreciated
5886            outfile.smoothing = 'No'
5887            outfile.vertices_are_stored_uniquely = 'True'
5888        outfile.order = order
5889
5890        try:
5891            revision_number = get_revision_number()
5892        except:
5893            revision_number = None
5894        # Allow None to be stored as a string
5895        outfile.revision_number = str(revision_number)
5896
5897        # This is being used to seperate one number from a list.
5898        # what it is actually doing is sorting lists from numeric arrays.
5899        if isinstance(times, (list, num.ndarray)):
5900            number_of_times = len(times)
5901            times = ensure_numeric(times)
5902            if number_of_times == 0:
5903                starttime = 0
5904            else:
5905                starttime = times[0]
5906                times = times - starttime  #Store relative times
5907        else:
5908            number_of_times = 0
5909            starttime = times
5910            #times = ensure_numeric([])
5911
5912        outfile.starttime = starttime
5913
5914        # dimension definitions
5915        outfile.createDimension('number_of_volumes', number_of_volumes)
5916        outfile.createDimension('number_of_vertices', 3)
5917        outfile.createDimension('numbers_in_range', 2)
5918
5919        if smoothing is True:
5920            outfile.createDimension('number_of_points', number_of_points)
5921            # FIXME(Ole): This will cause sww files for parallel domains to
5922            # have ghost nodes stored (but not used by triangles).
5923            # To clean this up, we have to change get_vertex_values and
5924            # friends in quantity.py (but I can't be bothered right now)
5925        else:
5926            outfile.createDimension('number_of_points', 3*number_of_volumes)
5927
5928        outfile.createDimension('number_of_timesteps', number_of_times)
5929
5930        # variable definitions
5931        outfile.createVariable('x', sww_precision, ('number_of_points',))
5932        outfile.createVariable('y', sww_precision, ('number_of_points',))
5933        outfile.createVariable('elevation', sww_precision,
5934                               ('number_of_points',))
5935        q = 'elevation'
5936        outfile.createVariable(q + Write_sww.RANGE, sww_precision,
5937                               ('numbers_in_range',))
5938
5939        # Initialise ranges with small and large sentinels.
5940        # If this was in pure Python we could have used None sensibly
5941        outfile.variables[q+Write_sww.RANGE][0] = max_float  # Min
5942        outfile.variables[q+Write_sww.RANGE][1] = -max_float # Max
5943
5944        # FIXME: Backwards compatibility
5945        outfile.createVariable('z', sww_precision, ('number_of_points',))
5946
5947        outfile.createVariable('volumes', netcdf_int, ('number_of_volumes',
5948                                                       'number_of_vertices'))
5949
5950        # Doing sww_precision instead of Float gives cast errors.
5951        outfile.createVariable('time', netcdf_float,
5952                               ('number_of_timesteps',))
5953
5954        for q in Write_sww.sww_quantities:
5955            outfile.createVariable(q, sww_precision, ('number_of_timesteps',
5956                                                      'number_of_points'))
5957            outfile.createVariable(q + Write_sww.RANGE, sww_precision,
5958                                   ('numbers_in_range',))
5959
5960            # Initialise ranges with small and large sentinels.
5961            # If this was in pure Python we could have used None sensibly
5962            outfile.variables[q+Write_sww.RANGE][0] = max_float  # Min
5963            outfile.variables[q+Write_sww.RANGE][1] = -max_float # Max
5964
5965        if isinstance(times, (list, num.ndarray)):
5966            outfile.variables['time'][:] = times    #Store time relative
5967
5968        if verbose:
5969            print '------------------------------------------------'
5970            print 'Statistics:'
5971            print '    t in [%f, %f], len(t) == %d' \
5972                  % (num.min(times), num.max(times), len(times.flat))
5973
5974    ##
5975    # @brief Store triangulation data in the underlying file.
5976    # @param outfile Open handle to underlying file.
5977    # @param points_utm List or array of points in UTM.
5978    # @param volumes
5979    # @param elevation
5980    # @param zone
5981    # @param new_origin georeference that the points can be set to.
5982    # @param points_georeference The georeference of the points_utm.
5983    # @param verbose True if this function is to be verbose.
5984    def store_triangulation(self,
5985                            outfile,
5986                            points_utm,
5987                            volumes,
5988                            elevation, zone=None, new_origin=None,
5989                            points_georeference=None, verbose=False):
5990        """
5991        new_origin - qa georeference that the points can be set to. (Maybe
5992        do this before calling this function.)
5993
5994        points_utm - currently a list or array of the points in UTM.
5995        points_georeference - the georeference of the points_utm
5996
5997        How about passing new_origin and current_origin.
5998        If you get both, do a convertion from the old to the new.
5999
6000        If you only get new_origin, the points are absolute,
6001        convert to relative
6002
6003        if you only get the current_origin the points are relative, store
6004        as relative.
6005
6006        if you get no georefs create a new georef based on the minimums of
6007        points_utm.  (Another option would be to default to absolute)
6008
6009        Yes, and this is done in another part of the code.
6010        Probably geospatial.
6011
6012        If you don't supply either geo_refs, then supply a zone. If not
6013        the default zone will be used.
6014
6015        precon:
6016            header has been called.
6017        """
6018
6019        number_of_points = len(points_utm)
6020        volumes = num.array(volumes)
6021        points_utm = num.array(points_utm)
6022
6023        # given the two geo_refs and the points, do the stuff
6024        # described in the method header
6025        # if this is needed else where, pull out as a function
6026        points_georeference = ensure_geo_reference(points_georeference)
6027        new_origin = ensure_geo_reference(new_origin)
6028        if new_origin is None and points_georeference is not None:
6029            points = points_utm
6030            geo_ref = points_georeference
6031        else:
6032            if new_origin is None:
6033                new_origin = Geo_reference(zone, min(points_utm[:,0]),
6034                                                 min(points_utm[:,1]))
6035            points = new_origin.change_points_geo_ref(points_utm,
6036                                                      points_georeference)
6037            geo_ref = new_origin
6038
6039        # At this stage I need a georef and points
6040        # the points are relative to the georef
6041        geo_ref.write_NetCDF(outfile)
6042
6043        # This will put the geo ref in the middle
6044        #geo_ref = Geo_reference(refzone,(max(x)+min(x))/2.0,(max(x)+min(y))/2.)
6045
6046        x =  points[:,0]
6047        y =  points[:,1]
6048        z = outfile.variables['z'][:]
6049
6050        if verbose:
6051            print '------------------------------------------------'
6052            print 'More Statistics:'
6053            print '  Extent (/lon):'
6054            print '    x in [%f, %f], len(lat) == %d' \
6055                  % (min(x), max(x), len(x))
6056            print '    y in [%f, %f], len(lon) == %d' \
6057                  % (min(y), max(y), len(y))
6058            print '    z in [%f, %f], len(z) == %d' \
6059                  % (min(elevation), max(elevation), len(elevation))
6060            print 'geo_ref: ',geo_ref
6061            print '------------------------------------------------'
6062
6063        #z = resize(bath_grid, outfile.variables['z'][:].shape)
6064        outfile.variables['x'][:] = points[:,0] #- geo_ref.get_xllcorner()
6065        outfile.variables['y'][:] = points[:,1] #- geo_ref.get_yllcorner()
6066        outfile.variables['z'][:] = elevation
6067        outfile.variables['elevation'][:] = elevation  #FIXME HACK
6068        outfile.variables['volumes'][:] = volumes.astype(num.int32) #On Opteron 64
6069
6070        q = 'elevation'
6071        # This updates the _range values
6072        outfile.variables[q + Write_sww.RANGE][0] = num.min(elevation)
6073        outfile.variables[q + Write_sww.RANGE][1] = num.max(elevation)
6074
6075
6076    ##
6077    # @brief Write the quantity data to the underlying file.
6078    # @param outfile Handle to open underlying file.
6079    # @param sww_precision Format of quantity data to write (default Float32).
6080    # @param slice_index
6081    # @param time
6082    # @param verbose True if this function is to be verbose.
6083    # @param **quant
6084    def store_quantities(self, outfile, sww_precision=num.float32,
6085                         slice_index=None, time=None,
6086                         verbose=False, **quant):
6087        """
6088        Write the quantity info.
6089
6090        **quant is extra keyword arguments passed in. These must be
6091          the sww quantities, currently; stage, xmomentum, ymomentum.
6092
6093        if the time array is already been built, use the slice_index
6094        to specify the index.
6095
6096        Otherwise, use time to increase the time dimension
6097
6098        Maybe make this general, but the viewer assumes these quantities,
6099        so maybe we don't want it general - unless the viewer is general
6100
6101        precon
6102        triangulation and
6103        header have been called.
6104        """
6105
6106        if time is not None:
6107            file_time = outfile.variables['time']
6108            slice_index = len(file_time)
6109            file_time[slice_index] = time
6110
6111        # Write the conserved quantities from Domain.
6112        # Typically stage,  xmomentum, ymomentum
6113        # other quantities will be ignored, silently.
6114        # Also write the ranges: stage_range,
6115        # xmomentum_range and ymomentum_range
6116        for q in Write_sww.sww_quantities:
6117            if not quant.has_key(q):
6118                msg = 'SWW file can not write quantity %s' % q
6119                raise NewQuantity, msg
6120            else:
6121                q_values = quant[q]
6122                outfile.variables[q][slice_index] = \
6123                                q_values.astype(sww_precision)
6124       
6125                # This updates the _range values
6126                q_range = outfile.variables[q + Write_sww.RANGE][:]
6127                q_values_min = num.min(q_values)
6128                if q_values_min < q_range[0]:
6129                    outfile.variables[q + Write_sww.RANGE][0] = q_values_min
6130                q_values_max = num.max(q_values)
6131                if q_values_max > q_range[1]:
6132                    outfile.variables[q + Write_sww.RANGE][1] = q_values_max
6133
6134    ##
6135    # @brief Print the quantities in the underlying file.
6136    # @param outfile UNUSED.
6137    def verbose_quantities(self, outfile):
6138        print '------------------------------------------------'
6139        print 'More Statistics:'
6140        for q in Write_sww.sww_quantities:
6141            print %s in [%f, %f]' % (q,
6142                                        outfile.variables[q+Write_sww.RANGE][0],
6143                                        outfile.variables[q+Write_sww.RANGE][1])
6144        print '------------------------------------------------'
6145
6146
6147##
6148# @brief Obsolete?
6149# @param outfile
6150# @param has
6151# @param uas
6152# @param vas
6153# @param elevation
6154# @param mean_stage
6155# @param zscale
6156# @param verbose
6157def obsolete_write_sww_time_slices(outfile, has, uas, vas, elevation,
6158                                   mean_stage=0, zscale=1,
6159                                   verbose=False):
6160    #Time stepping
6161    stage = outfile.variables['stage']
6162    xmomentum = outfile.variables['xmomentum']
6163    ymomentum = outfile.variables['ymomentum']
6164
6165    n = len(has)
6166    j = 0
6167    for ha, ua, va in map(None, has, uas, vas):
6168        if verbose and j % ((n+10)/10) == 0: print '  Doing %d of %d' % (j, n)
6169        w = zscale*ha + mean_stage
6170        stage[j] = w
6171        h = w - elevation
6172        xmomentum[j] = ua * h
6173        ymomentum[j] = -1 * va * h  # -1 since in mux files south is positive.
6174        j += 1
6175
6176
6177##
6178# @brief Convert a set of URS files to a text file.
6179# @param basename_in Stem path to the 3 URS files.
6180# @param location_index ??
6181def urs2txt(basename_in, location_index=None):
6182    """
6183    Not finished or tested
6184    """
6185
6186    files_in = [basename_in + WAVEHEIGHT_MUX_LABEL,
6187                basename_in + EAST_VELOCITY_LABEL,
6188                basename_in + NORTH_VELOCITY_LABEL]
6189    quantities = ['HA','UA','VA']
6190
6191    d = ","
6192
6193    # instantiate urs_points of the three mux files.
6194    mux = {}
6195    for quantity, file in map(None, quantities, files_in):
6196        mux[quantity] = Urs_points(file)
6197
6198    # Could check that the depth is the same. (hashing)
6199
6200    # handle to a mux file to do depth stuff
6201    a_mux = mux[quantities[0]]
6202
6203    # Convert to utm
6204    latitudes = a_mux.lonlatdep[:,1]
6205    longitudes = a_mux.lonlatdep[:,0]
6206    points_utm, zone = \
6207        convert_from_latlon_to_utm(latitudes=latitudes, longitudes=longitudes)
6208    depths = a_mux.lonlatdep[:,2]
6209
6210    # open the output text file, start writing.
6211    fid = open(basename_in + '.txt', 'w')
6212
6213    fid.write("zone: " + str(zone) + "\n")
6214
6215    if location_index is not None:
6216        #Title
6217        li = location_index
6218        fid.write('location_index' + d + 'lat' + d + 'long' + d +
6219                  'Easting' + d + 'Northing' + '\n')
6220        fid.write(str(li) + d + str(latitudes[li]) + d +
6221                  str(longitudes[li]) + d + str(points_utm[li][0]) + d +
6222                  str(points_utm[li][01]) + '\n')
6223
6224    # the non-time dependent stuff
6225    #Title
6226    fid.write('location_index' + d + 'lat' + d + 'long' + d +
6227              'Easting' + d + 'Northing' + d + 'depth m' + '\n')
6228    i = 0
6229    for depth, point_utm, lat, long in map(None, depths, points_utm,
6230                                           latitudes, longitudes):
6231
6232        fid.write(str(i) + d + str(lat) + d + str(long) + d +
6233                  str(point_utm[0]) + d + str(point_utm[01]) + d +
6234                  str(depth) + '\n')
6235        i += 1
6236
6237    #Time dependent
6238    if location_index is not None:
6239        time_step = a_mux.time_step
6240        i = 0
6241        #Title
6242        fid.write('time' + d + 'HA depth m' + d + 'UA momentum East x m/sec' +
6243                  d + 'VA momentum North y m/sec' + '\n')
6244        for HA, UA, VA in map(None, mux['HA'], mux['UA'], mux['VA']):
6245            fid.write(str(i*time_step) + d + str(HA[location_index]) + d +
6246                      str(UA[location_index]) + d +
6247                      str(VA[location_index]) + '\n')
6248            i += 1
6249
6250
6251##
6252# @brief A class to write STS files.
6253class Write_sts:
6254    sts_quantities = ['stage','xmomentum','ymomentum']
6255    RANGE = '_range'
6256    EXTREMA = ':extrema'
6257
6258    ##
6259    # @brief Instantiate this STS writer class.
6260    def __init__(self):
6261        pass
6262
6263    ##
6264    # @brief Write a header to the underlying data file.
6265    # @param outfile Handle to open file to write.
6266    # @param times A list of the time slice times *or* a start time.
6267    # @param number_of_points The number of URS gauge sites.
6268    # @param description Description string to write into the STS file.
6269    # @param sts_precision Format of data to write (netcdf constant ONLY).
6270    # @param verbose True if this function is to be verbose.
6271    # @note If 'times' is a list, the info will be made relative.
6272    def store_header(self,
6273                     outfile,
6274                     times,
6275                     number_of_points,
6276                     description='Converted from URS mux2 format',
6277                     sts_precision=netcdf_float32,
6278                     verbose=False):
6279        """
6280        outfile - the name of the file that will be written
6281        times - A list of the time slice times OR a start time
6282        Note, if a list is given the info will be made relative.
6283        number_of_points - the number of urs gauges sites
6284        """
6285
6286        outfile.institution = 'Geoscience Australia'
6287        outfile.description = description
6288
6289        try:
6290            revision_number = get_revision_number()
6291        except:
6292            revision_number = None
6293
6294        # Allow None to be stored as a string
6295        outfile.revision_number = str(revision_number)
6296
6297        # Start time in seconds since the epoch (midnight 1/1/1970)
6298        # This is being used to seperate one number from a list.
6299        # what it is actually doing is sorting lists from numeric arrays.
6300        if isinstance(times, (list, num.ndarray)):
6301            number_of_times = len(times)
6302            times = ensure_numeric(times)
6303            if number_of_times == 0:
6304                starttime = 0
6305            else:
6306                starttime = times[0]
6307                times = times - starttime  #Store relative times
6308        else:
6309            number_of_times = 0
6310            starttime = times
6311
6312        outfile.starttime = starttime
6313
6314        # Dimension definitions
6315        outfile.createDimension('number_of_points', number_of_points)
6316        outfile.createDimension('number_of_timesteps', number_of_times)
6317        outfile.createDimension('numbers_in_range', 2)
6318
6319        # Variable definitions
6320        outfile.createVariable('permutation', netcdf_int, ('number_of_points',))
6321        outfile.createVariable('x', sts_precision, ('number_of_points',))
6322        outfile.createVariable('y', sts_precision, ('number_of_points',))
6323        outfile.createVariable('elevation',sts_precision, ('number_of_points',))
6324
6325        q = 'elevation'
6326        outfile.createVariable(q + Write_sts.RANGE, sts_precision,
6327                               ('numbers_in_range',))
6328
6329        # Initialise ranges with small and large sentinels.
6330        # If this was in pure Python we could have used None sensibly
6331        outfile.variables[q + Write_sts.RANGE][0] = max_float  # Min
6332        outfile.variables[q + Write_sts.RANGE][1] = -max_float # Max
6333
6334        # Doing sts_precision instead of Float gives cast errors.
6335        outfile.createVariable('time', netcdf_float, ('number_of_timesteps',))
6336
6337        for q in Write_sts.sts_quantities:
6338            outfile.createVariable(q, sts_precision, ('number_of_timesteps',
6339                                                      'number_of_points'))
6340            outfile.createVariable(q + Write_sts.RANGE, sts_precision,
6341                                   ('numbers_in_range',))
6342            # Initialise ranges with small and large sentinels.
6343            # If this was in pure Python we could have used None sensibly
6344            outfile.variables[q + Write_sts.RANGE][0] = max_float  # Min
6345            outfile.variables[q + Write_sts.RANGE][1] = -max_float # Max
6346
6347        if isinstance(times, (list, num.ndarray)):
6348            outfile.variables['time'][:] = times    #Store time relative
6349
6350        if verbose:
6351            print '------------------------------------------------'
6352            print 'Statistics:'
6353            print '    t in [%f, %f], len(t) == %d' \
6354                  % (num.min(times), num.max(times), len(times.flat))
6355
6356    ##
6357    # @brief
6358    # @param outfile
6359    # @param points_utm
6360    # @param elevation
6361    # @param zone
6362    # @param new_origin
6363    # @param points_georeference
6364    # @param verbose True if this function is to be verbose.
6365    def store_points(self,
6366                     outfile,
6367                     points_utm,
6368                     elevation, zone=None, new_origin=None,
6369                     points_georeference=None, verbose=False):
6370
6371        """
6372        points_utm - currently a list or array of the points in UTM.
6373        points_georeference - the georeference of the points_utm
6374
6375        How about passing new_origin and current_origin.
6376        If you get both, do a convertion from the old to the new.
6377
6378        If you only get new_origin, the points are absolute,
6379        convert to relative
6380
6381        if you only get the current_origin the points are relative, store