source: anuga_core/source/anuga/shallow_water/data_manager.py @ 6114

Last change on this file since 6114 was 6114, checked in by ole, 15 years ago

Cosmetics in csv2dict

File size: 248.4 KB
Line 
1"""datamanager.py - input output for AnuGA
2
3
4This module takes care of reading and writing datafiles such as topograhies,
5model output, etc
6
7
8Formats used within AnuGA:
9
10.sww: Netcdf format for storing model output f(t,x,y)
11.tms: Netcdf format for storing time series f(t)
12
13.csv: ASCII format for storing arbitrary points and associated attributes
14.pts: NetCDF format for storing arbitrary points and associated attributes
15
16.asc: ASCII format of regular DEMs as output from ArcView
17.prj: Associated ArcView file giving more meta data for asc format
18.ers: ERMapper header format of regular DEMs for ArcView
19
20.dem: NetCDF representation of regular DEM data
21
22.tsh: ASCII format for storing meshes and associated boundary and region info
23.msh: NetCDF format for storing meshes and associated boundary and region info
24
25.nc: Native ferret NetCDF format
26.geo: Houdinis ascii geometry format (?)
27
28
29A typical dataflow can be described as follows
30
31Manually created files:
32ASC, PRJ:     Digital elevation models (gridded)
33TSH:          Triangular meshes (e.g. created from anuga.pmesh)
34NC            Model outputs for use as boundary conditions (e.g from MOST)
35
36
37AUTOMATICALLY CREATED FILES:
38
39ASC, PRJ  ->  DEM  ->  PTS: Conversion of DEM's to native pts file
40
41NC -> SWW: Conversion of MOST bundary files to boundary sww
42
43PTS + TSH -> TSH with elevation: Least squares fit
44
45TSH -> SWW:  Conversion of TSH to sww viewable using Swollen
46
47TSH + Boundary SWW -> SWW: Simluation using abstract_2d_finite_volumes
48
49"""
50
51# This file was reverted from changeset:5484 to changeset:5470 on 10th July
52# by Ole.
53
54import os, sys
55import csv
56import exceptions
57import string
58import shutil
59from struct import unpack
60import array as p_array
61from os import sep, path, remove, mkdir, access, F_OK, W_OK, getcwd
62
63from Numeric import concatenate, array, Float, Int, Int32, resize, \
64     sometrue, searchsorted, zeros, allclose, around, reshape, \
65     transpose, sort, NewAxis, ArrayType, compress, take, arange, \
66     argmax, alltrue, shape, Float32, size
67
68from Scientific.IO.NetCDF import NetCDFFile
69from os.path import exists, basename, join
70from os import getcwd
71
72from anuga.coordinate_transforms.redfearn import redfearn, \
73     convert_from_latlon_to_utm
74from anuga.coordinate_transforms.geo_reference import Geo_reference, \
75     write_NetCDF_georeference, ensure_geo_reference
76from anuga.geospatial_data.geospatial_data import Geospatial_data,\
77     ensure_absolute
78from anuga.config import minimum_storable_height as \
79     default_minimum_storable_height
80from anuga.config import netcdf_mode_r, netcdf_mode_w, netcdf_mode_a
81from anuga.config import max_float
82from anuga.utilities.numerical_tools import ensure_numeric,  mean
83from anuga.caching.caching import myhash
84from anuga.utilities.anuga_exceptions import ANUGAError
85from anuga.shallow_water import Domain
86from anuga.abstract_2d_finite_volumes.pmesh2domain import \
87     pmesh_to_domain_instance
88from anuga.abstract_2d_finite_volumes.util import get_revision_number, \
89     remove_lone_verts, sww2timeseries, get_centroid_values
90
91from anuga.abstract_2d_finite_volumes.neighbour_mesh import segment_midpoints
92from anuga.load_mesh.loadASCII import export_mesh_file
93from anuga.utilities.polygon import intersection
94
95
96######
97# Exception classes
98######
99
100class TitleValueError(exceptions.Exception): pass
101class DataMissingValuesError(exceptions.Exception): pass
102class DataFileNotOpenError(exceptions.Exception): pass
103class DataTimeError(exceptions.Exception): pass
104class DataDomainError(exceptions.Exception): pass
105class NewQuantity(exceptions.Exception): pass
106
107
108######
109# formula mappings
110######
111
112quantity_formula = {'momentum':'(xmomentum**2 + ymomentum**2)**0.5',
113                    'depth':'stage-elevation',
114                    'speed': \
115 '(xmomentum**2 + ymomentum**2)**0.5/(stage-elevation+1.e-6/(stage-elevation))'}
116
117
118##
119# @brief Convert a possible filename into a standard form.
120# @param s Filename to process.
121# @return The new filename string.
122def make_filename(s):
123    """Transform argument string into a Sexsuitable filename
124    """
125
126    s = s.strip()
127    s = s.replace(' ', '_')
128    s = s.replace('(', '')
129    s = s.replace(')', '')
130    s = s.replace('__', '_')
131
132    return s
133
134
135##
136# @brief Check that a specified filesystem directory path exists.
137# @param path The dirstory path to check.
138# @param verbose True if this function is to be verbose.
139# @note If directory path doesn't exist, it will be created.
140def check_dir(path, verbose=None):
141    """Check that specified path exists.
142    If path does not exist it will be created if possible
143
144    USAGE:
145       checkdir(path, verbose):
146
147    ARGUMENTS:
148        path -- Directory
149        verbose -- Flag verbose output (default: None)
150
151    RETURN VALUE:
152        Verified path including trailing separator
153    """
154
155    import os.path
156
157    if sys.platform in ['nt', 'dos', 'win32', 'what else?']:
158        unix = 0
159    else:
160        unix = 1
161
162    # add terminal separator, if it's not already there
163    if path[-1] != os.sep:
164        path = path + os.sep
165
166    # expand ~ or ~username in path
167    path = os.path.expanduser(path)
168
169    # create directory if required
170    if not (os.access(path, os.R_OK and os.W_OK) or path == ''):
171        try:
172            exitcode = os.mkdir(path)
173
174            # Change access rights if possible
175            if unix:
176                exitcode = os.system('chmod 775 ' + path)
177            else:
178                pass  # FIXME: What about access rights under Windows?
179
180            if verbose: print 'MESSAGE: Directory', path, 'created.'
181        except:
182            print 'WARNING: Directory', path, 'could not be created.'
183            if unix:
184                path = '/tmp/'
185            else:
186                path = 'C:' + os.sep
187
188            print "Using directory '%s' instead" % path
189
190    return path
191
192
193##
194# @brief Delete directory and all sub-directories.
195# @param path Path to the directory to delete.
196def del_dir(path):
197    """Recursively delete directory path and all its contents
198    """
199
200    if os.path.isdir(path):
201        for file in os.listdir(path):
202            X = os.path.join(path, file)
203
204            if os.path.isdir(X) and not os.path.islink(X):
205                del_dir(X)
206            else:
207                try:
208                    os.remove(X)
209                except:
210                    print "Could not remove file %s" % X
211
212        os.rmdir(path)
213
214
215##
216# @brief ??
217# @param path
218# @param __func__
219# @param verbose True if this function is to be verbose.
220# @note ANOTHER OPTION, IF NEED IN THE FUTURE, Nick B 7/2007
221def rmgeneric(path, func, verbose=False):
222    ERROR_STR= """Error removing %(path)s, %(error)s """
223
224    try:
225        func(path)
226        if verbose: print 'Removed ', path
227    except OSError, (errno, strerror):
228        print ERROR_STR % {'path' : path, 'error': strerror }
229
230
231##
232# @brief Remove directory and all sub-directories.
233# @param path Filesystem path to directory to remove.
234# @param verbose True if this function is to be verbose.
235def removeall(path, verbose=False):
236    if not os.path.isdir(path):
237        return
238
239    for x in os.listdir(path):
240        fullpath = os.path.join(path, x)
241        if os.path.isfile(fullpath):
242            f = os.remove
243            rmgeneric(fullpath, f)
244        elif os.path.isdir(fullpath):
245            removeall(fullpath)
246            f = os.rmdir
247            rmgeneric(fullpath, f, verbose)
248
249
250##
251# @brief Create a standard filename.
252# @param datadir Directory where file is to be created.
253# @param filename Filename 'stem'.
254# @param format Format of the file, becomes filename extension.
255# @param size Size of file, becomes part of filename.
256# @param time Time (float), becomes part of filename.
257# @return The complete filename path, including directory.
258# @note The containing directory is created, if necessary.
259def create_filename(datadir, filename, format, size=None, time=None):
260    FN = check_dir(datadir) + filename
261
262    if size is not None:
263        FN += '_size%d' % size
264
265    if time is not None:
266        FN += '_time%.2f' % time
267
268    FN += '.' + format
269
270    return FN
271
272
273##
274# @brief Get all files with a standard name and a given set of attributes.
275# @param datadir Directory files must be in.
276# @param filename Filename stem.
277# @param format Filename extension.
278# @param size Filename size.
279# @return A list of fielnames (including directory) that match the attributes.
280def get_files(datadir, filename, format, size):
281    """Get all file (names) with given name, size and format
282    """
283
284    import glob
285
286    dir = check_dir(datadir)
287    pattern = dir + os.sep + filename + '_size=%d*.%s' % (size, format)
288
289    return glob.glob(pattern)
290
291
292##
293# @brief Generic class for storing output to e.g. visualisation or checkpointing
294class Data_format:
295    """Generic interface to data formats
296    """
297
298    ##
299    # @brief Instantiate this instance.
300    # @param domain
301    # @param extension
302    # @param mode The mode of the underlying file.
303    def __init__(self, domain, extension, mode=netcdf_mode_w):
304        assert mode[0] in ['r', 'w', 'a'], \
305               "Mode %s must be either:\n" % mode + \
306               "   'w' (write)\n" + \
307               "   'r' (read)\n" + \
308               "   'a' (append)"
309
310        #Create filename
311        self.filename = create_filename(domain.get_datadir(),
312                                        domain.get_name(), extension)
313
314        self.timestep = 0
315        self.domain = domain
316
317        # Exclude ghosts in case this is a parallel domain
318        self.number_of_nodes = domain.number_of_full_nodes
319        self.number_of_volumes = domain.number_of_full_triangles
320        #self.number_of_volumes = len(domain)
321
322        #FIXME: Should we have a general set_precision function?
323
324
325##
326# @brief Class for storing output to e.g. visualisation
327class Data_format_sww(Data_format):
328    """Interface to native NetCDF format (.sww) for storing model output
329
330    There are two kinds of data
331
332    1: Constant data: Vertex coordinates and field values. Stored once
333    2: Variable data: Conserved quantities. Stored once per timestep.
334
335    All data is assumed to reside at vertex locations.
336    """
337
338    ##
339    # @brief Instantiate this instance.
340    # @param domain ??
341    # @param mode Mode of the underlying data file.
342    # @param max_size ??
343    # @param recursion ??
344    # @note Prepare the underlying data file if mode starts with 'w'.
345    def __init__(self, domain, mode=netcdf_mode_w, max_size=2000000000, recursion=False):
346        from Scientific.IO.NetCDF import NetCDFFile
347        from Numeric import Int, Float, Float32
348
349        self.precision = Float32 #Use single precision for quantities
350        self.recursion = recursion
351        self.mode = mode
352        if hasattr(domain, 'max_size'):
353            self.max_size = domain.max_size #file size max is 2Gig
354        else:
355            self.max_size = max_size
356        if hasattr(domain, 'minimum_storable_height'):
357            self.minimum_storable_height = domain.minimum_storable_height
358        else:
359            self.minimum_storable_height = default_minimum_storable_height
360
361        # call owning constructor
362        Data_format.__init__(self, domain, 'sww', mode)
363
364        # NetCDF file definition
365        fid = NetCDFFile(self.filename, mode)
366        if mode[0] == 'w':
367            description = 'Output from anuga.abstract_2d_finite_volumes ' \
368                          'suitable for plotting'
369            self.writer = Write_sww()
370            self.writer.store_header(fid,
371                                     domain.starttime,
372                                     self.number_of_volumes,
373                                     self.domain.number_of_full_nodes,
374                                     description=description,
375                                     smoothing=domain.smooth,
376                                     order=domain.default_order,
377                                     sww_precision=self.precision)
378
379            # Extra optional information
380            if hasattr(domain, 'texture'):
381                fid.texture = domain.texture
382
383            if domain.quantities_to_be_monitored is not None:
384                fid.createDimension('singleton', 1)
385                fid.createDimension('two', 2)
386
387                poly = domain.monitor_polygon
388                if poly is not None:
389                    N = len(poly)
390                    fid.createDimension('polygon_length', N)
391                    fid.createVariable('extrema.polygon',
392                                       self.precision,
393                                       ('polygon_length', 'two'))
394                    fid.variables['extrema.polygon'][:] = poly
395
396                interval = domain.monitor_time_interval
397                if interval is not None:
398                    fid.createVariable('extrema.time_interval',
399                                       self.precision,
400                                       ('two',))
401                    fid.variables['extrema.time_interval'][:] = interval
402
403                for q in domain.quantities_to_be_monitored:
404                    fid.createVariable(q + '.extrema', self.precision,
405                                       ('numbers_in_range',))
406                    fid.createVariable(q + '.min_location', self.precision,
407                                       ('numbers_in_range',))
408                    fid.createVariable(q + '.max_location', self.precision,
409                                       ('numbers_in_range',))
410                    fid.createVariable(q + '.min_time', self.precision,
411                                       ('singleton',))
412                    fid.createVariable(q + '.max_time', self.precision,
413                                       ('singleton',))
414
415        fid.close()
416
417    ##
418    # @brief Store connectivity data into the underlying data file.
419    def store_connectivity(self):
420        """Specialisation of store_connectivity for net CDF format
421
422        Writes x,y,z coordinates of triangles constituting
423        the bed elevation.
424        """
425
426        from Scientific.IO.NetCDF import NetCDFFile
427        from Numeric import concatenate, Int
428
429        domain = self.domain
430
431        # append to the NetCDF file
432        fid = NetCDFFile(self.filename, netcdf_mode_a)
433
434#        # Get the variables
435#        x = fid.variables['x']
436#        y = fid.variables['y']
437#        z = fid.variables['elevation']
438#        volumes = fid.variables['volumes']
439
440        # Get X, Y and bed elevation Z
441        Q = domain.quantities['elevation']
442        X,Y,Z,V = Q.get_vertex_values(xy=True, precision=self.precision)
443
444        # store the connectivity data
445        points = concatenate( (X[:,NewAxis],Y[:,NewAxis]), axis=1 )
446        self.writer.store_triangulation(fid,
447                                        points,
448#                                        V.astype(volumes.typecode()),
449                                        V.astype(Float32),
450                                        Z,
451                                        points_georeference=\
452                                            domain.geo_reference)
453
454        fid.close()
455
456    ##
457    # @brief Store time and named quantities to the underlying data file.
458    # @param names The names of the quantities to store.
459    # @note If 'names' not supplied, store a standard set of quantities.
460    def store_timestep(self, names=None):
461        """Store time and named quantities to file
462        """
463
464        from Scientific.IO.NetCDF import NetCDFFile
465        import types
466        from time import sleep
467        from os import stat
468        from Numeric import choose
469
470        if names is None:
471            # Standard shallow water wave equation quantitites in ANUGA
472            names = ['stage', 'xmomentum', 'ymomentum']
473
474        # Get NetCDF
475        retries = 0
476        file_open = False
477        while not file_open and retries < 10:
478            try:
479                fid = NetCDFFile(self.filename, netcdf_mode_a) # Open existing file
480            except IOError:
481                # This could happen if someone was reading the file.
482                # In that case, wait a while and try again
483                msg = 'Warning (store_timestep): File %s could not be opened' \
484                      % self.filename
485                msg += ' - trying step %s again' % self.domain.time
486                print msg
487                retries += 1
488                sleep(1)
489            else:
490                file_open = True
491
492        if not file_open:
493            msg = 'File %s could not be opened for append' % self.filename
494            raise DataFileNotOpenError, msg
495
496        # Check to see if the file is already too big:
497        time = fid.variables['time']
498        i = len(time) + 1
499        file_size = stat(self.filename)[6]
500        file_size_increase = file_size / i
501        if file_size + file_size_increase > self.max_size * 2**self.recursion:
502            # In order to get the file name and start time correct,
503            # I change the domain.filename and domain.starttime.
504            # This is the only way to do this without changing
505            # other modules (I think).
506
507            # Write a filename addon that won't break swollens reader
508            # (10.sww is bad)
509            filename_ext = '_time_%s' % self.domain.time
510            filename_ext = filename_ext.replace('.', '_')
511
512            # Remember the old filename, then give domain a
513            # name with the extension
514            old_domain_filename = self.domain.get_name()
515            if not self.recursion:
516                self.domain.set_name(old_domain_filename + filename_ext)
517
518            # Change the domain starttime to the current time
519            old_domain_starttime = self.domain.starttime
520            self.domain.starttime = self.domain.time
521
522            # Build a new data_structure.
523            next_data_structure = Data_format_sww(self.domain, mode=self.mode,
524                                                  max_size=self.max_size,
525                                                  recursion=self.recursion+1)
526            if not self.recursion:
527                print '    file_size = %s' % file_size
528                print '    saving file to %s' % next_data_structure.filename
529
530            #set up the new data_structure
531            self.domain.writer = next_data_structure
532
533            #FIXME - could be cleaner to use domain.store_timestep etc.
534            next_data_structure.store_connectivity()
535            next_data_structure.store_timestep(names)
536            fid.sync()
537            fid.close()
538
539            #restore the old starttime and filename
540            self.domain.starttime = old_domain_starttime
541            self.domain.set_name(old_domain_filename)
542        else:
543            self.recursion = False
544            domain = self.domain
545
546            # Get the variables
547            time = fid.variables['time']
548            stage = fid.variables['stage']
549            xmomentum = fid.variables['xmomentum']
550            ymomentum = fid.variables['ymomentum']
551            i = len(time)
552            if type(names) not in [types.ListType, types.TupleType]:
553                names = [names]
554
555            if 'stage' in names \
556               and 'xmomentum' in names \
557               and 'ymomentum' in names:
558                # Get stage, elevation, depth and select only those
559                # values where minimum_storable_height is exceeded
560                Q = domain.quantities['stage']
561                A, _ = Q.get_vertex_values(xy=False, precision=self.precision)
562                z = fid.variables['elevation']
563
564                storable_indices = (A-z[:] >= self.minimum_storable_height)
565                stage = choose(storable_indices, (z[:], A))
566
567                # Define a zero vector of same size and type as A
568                # for use with momenta
569                null = zeros(size(A), A.typecode())
570
571                # Get xmomentum where depth exceeds minimum_storable_height
572                Q = domain.quantities['xmomentum']
573                xmom, _ = Q.get_vertex_values(xy=False,
574                                              precision=self.precision)
575                xmomentum = choose(storable_indices, (null, xmom))
576
577
578                # Get ymomentum where depth exceeds minimum_storable_height
579                Q = domain.quantities['ymomentum']
580                ymom, _ = Q.get_vertex_values(xy=False,
581                                              precision=self.precision)
582                ymomentum = choose(storable_indices, (null, ymom))
583
584                # Write quantities to underlying data  file
585                self.writer.store_quantities(fid,
586                                             time=self.domain.time,
587                                             sww_precision=self.precision,
588                                             stage=stage,
589                                             xmomentum=xmomentum,
590                                             ymomentum=ymomentum)
591            else:
592                msg = 'Quantities stored must be: stage, xmomentum, ymomentum, '
593                msg += 'but I got: ' + str(names)
594                raise Exception, msg
595
596            # Update extrema if requested
597            domain = self.domain
598            if domain.quantities_to_be_monitored is not None:
599                for q, info in domain.quantities_to_be_monitored.items():
600                    if info['min'] is not None:
601                        fid.variables[q + '.extrema'][0] = info['min']
602                        fid.variables[q + '.min_location'][:] = \
603                                        info['min_location']
604                        fid.variables[q + '.min_time'][0] = info['min_time']
605
606                    if info['max'] is not None:
607                        fid.variables[q + '.extrema'][1] = info['max']
608                        fid.variables[q + '.max_location'][:] = \
609                                        info['max_location']
610                        fid.variables[q + '.max_time'][0] = info['max_time']
611
612            # Flush and close
613            fid.sync()
614            fid.close()
615
616
617##
618# @brief Class for handling checkpoints data
619class Data_format_cpt(Data_format):
620    """Interface to native NetCDF format (.cpt)
621    """
622
623    ##
624    # @brief Initialize this instantiation.
625    # @param domain ??
626    # @param mode Mode of underlying data file (default WRITE).
627    def __init__(self, domain, mode=netcdf_mode_w):
628        from Scientific.IO.NetCDF import NetCDFFile
629        from Numeric import Int, Float, Float
630
631        self.precision = Float #Use full precision
632
633        Data_format.__init__(self, domain, 'sww', mode)
634
635        # NetCDF file definition
636        fid = NetCDFFile(self.filename, mode)
637        if mode[0] == 'w':
638            #Create new file
639            fid.institution = 'Geoscience Australia'
640            fid.description = 'Checkpoint data'
641            #fid.smooth = domain.smooth
642            fid.order = domain.default_order
643
644            # dimension definitions
645            fid.createDimension('number_of_volumes', self.number_of_volumes)
646            fid.createDimension('number_of_vertices', 3)
647
648            #Store info at all vertices (no smoothing)
649            fid.createDimension('number_of_points', 3*self.number_of_volumes)
650            fid.createDimension('number_of_timesteps', None) #extensible
651
652            # variable definitions
653
654            #Mesh
655            fid.createVariable('x', self.precision, ('number_of_points',))
656            fid.createVariable('y', self.precision, ('number_of_points',))
657
658
659            fid.createVariable('volumes', Int, ('number_of_volumes',
660                                                'number_of_vertices'))
661
662            fid.createVariable('time', self.precision, ('number_of_timesteps',))
663
664            #Allocate space for all quantities
665            for name in domain.quantities.keys():
666                fid.createVariable(name, self.precision,
667                                   ('number_of_timesteps',
668                                    'number_of_points'))
669
670        fid.close()
671
672    ##
673    # @brief Store connectivity data to underlying data file.
674    def store_checkpoint(self):
675        """Write x,y coordinates of triangles.
676        Write connectivity (
677        constituting
678        the bed elevation.
679        """
680
681        from Scientific.IO.NetCDF import NetCDFFile
682        from Numeric import concatenate
683
684        domain = self.domain
685
686        #Get NetCDF
687        fid = NetCDFFile(self.filename, netcdf_mode_a)
688
689        # Get the variables
690        x = fid.variables['x']
691        y = fid.variables['y']
692
693        volumes = fid.variables['volumes']
694
695        # Get X, Y and bed elevation Z
696        Q = domain.quantities['elevation']
697        X,Y,Z,V = Q.get_vertex_values(xy=True, precision=self.precision)
698
699        x[:] = X.astype(self.precision)
700        y[:] = Y.astype(self.precision)
701        z[:] = Z.astype(self.precision)
702
703        volumes[:] = V
704
705        fid.close()
706
707    ##
708    # @brief Store tiem and named quantities to underlying data file.
709    # @param name
710    def store_timestep(self, name):
711        """Store time and named quantity to file
712        """
713
714        from Scientific.IO.NetCDF import NetCDFFile
715        from time import sleep
716
717        #Get NetCDF
718        retries = 0
719        file_open = False
720        while not file_open and retries < 10:
721            try:
722                fid = NetCDFFile(self.filename, netcdf_mode_a)
723            except IOError:
724                #This could happen if someone was reading the file.
725                #In that case, wait a while and try again
726                msg = 'Warning (store_timestep): File %s could not be opened' \
727                      ' - trying again' % self.filename
728                print msg
729                retries += 1
730                sleep(1)
731            else:
732                file_open = True
733
734        if not file_open:
735            msg = 'File %s could not be opened for append' % self.filename
736            raise DataFileNotOpenError, msg
737
738        domain = self.domain
739
740        # Get the variables
741        time = fid.variables['time']
742        stage = fid.variables['stage']
743        i = len(time)
744
745        #Store stage
746        time[i] = self.domain.time
747
748        # Get quantity
749        Q = domain.quantities[name]
750        A,V = Q.get_vertex_values(xy=False, precision=self.precision)
751
752        stage[i,:] = A.astype(self.precision)
753
754        #Flush and close
755        fid.sync()
756        fid.close()
757
758
759##
760# @brief Class for National Exposure Database storage (NEXIS).
761
762LAT_TITLE = 'LATITUDE'
763LONG_TITLE = 'LONGITUDE'
764X_TITLE = 'x'
765Y_TITLE = 'y'
766
767class Exposure_csv:
768
769    ##
770    # @brief Instantiate this instance.
771    # @param file_name Name of underlying data file.
772    # @param latitude_title ??
773    # @param longitude_title ??
774    # @param is_x_y_locations ??
775    # @param x_title ??
776    # @param y_title ??
777    # @param refine_polygon ??
778    # @param title_check_list ??
779    def __init__(self,file_name, latitude_title=LAT_TITLE,
780                 longitude_title=LONG_TITLE, is_x_y_locations=None,
781                 x_title=X_TITLE, y_title=Y_TITLE,
782                 refine_polygon=None, title_check_list=None):
783        """
784        This class is for handling the exposure csv file.
785        It reads the file in and converts the lats and longs to a geospatial
786        data object.
787        Use the methods to read and write columns.
788
789        The format of the csv files it reads is;
790           The first row is a title row.
791           comma's are the delimiters
792           each column is a 'set' of data
793
794        Feel free to use/expand it to read other csv files.
795
796        It is not for adding and deleting rows
797
798        Can geospatial handle string attributes? It's not made for them.
799        Currently it can't load and save string att's.
800
801        So just use geospatial to hold the x, y and georef? Bad, since
802        different att's are in diferent structures.  Not so bad, the info
803        to write if the .csv file is saved is in attribute_dic
804
805        The location info is in the geospatial attribute.
806        """
807
808        self._file_name = file_name
809        self._geospatial = None #
810
811        # self._attribute_dic is a dictionary.
812        #The keys are the column titles.
813        #The values are lists of column data
814
815        # self._title_index_dic is a dictionary.
816        #The keys are the column titles.
817        #The values are the index positions of file columns.
818        self._attribute_dic, self._title_index_dic = \
819            csv2dict(self._file_name, title_check_list=title_check_list)
820        try:
821            #Have code here that handles caps or lower
822            lats = self._attribute_dic[latitude_title]
823            longs = self._attribute_dic[longitude_title]
824        except KeyError:
825            # maybe a warning..
826            #Let's see if this works..
827            if False != is_x_y_locations:
828                is_x_y_locations = True
829            pass
830        else:
831            self._geospatial = Geospatial_data(latitudes=lats,
832                                               longitudes=longs)
833
834        if is_x_y_locations is True:
835            if self._geospatial is not None:
836                pass #fixme throw an error
837            try:
838                xs = self._attribute_dic[x_title]
839                ys = self._attribute_dic[y_title]
840                points = [[float(i),float(j)] for i,j in map(None,xs,ys)]
841            except KeyError:
842                # maybe a warning..
843                msg = "Could not find location information."
844                raise TitleValueError, msg
845            else:
846                self._geospatial = Geospatial_data(data_points=points)
847
848        # create a list of points that are in the refining_polygon
849        # described by a list of indexes representing the points
850
851    ##
852    # @brief Create a comparison method.
853    # @param self This object.
854    # @param other The other object.
855    # @return True if objects are 'same'.
856    def __cmp__(self, other):
857        #check that 'other' is an instance of this class
858        if isinstance(self, type(other)):
859            result = cmp(self._attribute_dic, other._attribute_dic)
860            if result <> 0:
861                return result
862
863            # The order of the columns is important. Therefore..
864            result = cmp(self._title_index_dic, other._title_index_dic)
865            if result <> 0:
866                return result
867            for self_ls, other_ls in map(None, self._attribute_dic,
868                                         other._attribute_dic):
869                result = cmp(self._attribute_dic[self_ls],
870                             other._attribute_dic[other_ls])
871                if result <> 0:
872                    return result
873            return 0
874        else:
875            return 1
876
877    ##
878    # @brief Get a list of column values given a column name.
879    # @param column_name The name of the column to get values from.
880    # @param use_refind_polygon Unused??
881    def get_column(self, column_name, use_refind_polygon=False):
882        """
883        Given a column name return a list of the column values
884
885        Note, the type of the values will be String!
886        do this to change a list of strings to a list of floats
887        time = [float(x) for x in time]
888
889        Not implemented:
890        if use_refind_polygon is True, only return values in the
891        refined polygon
892        """
893
894        if not self._attribute_dic.has_key(column_name):
895            msg = 'There is no column called %s!' % column_name
896            raise TitleValueError, msg
897
898        return self._attribute_dic[column_name]
899
900    ##
901    # @brief ??
902    # @param value_column_name ??
903    # @param known_column_name ??
904    # @param known_values ??
905    # @param use_refind_polygon ??
906    def get_value(self, value_column_name, known_column_name,
907                  known_values, use_refind_polygon=False):
908        """
909        Do linear interpolation on the known_colum, using the known_value,
910        to return a value of the column_value_name.
911        """
912
913        pass
914
915    ##
916    # @brief Get a geospatial object that describes the locations.
917    # @param use_refind_polygon Unused??
918    def get_location(self, use_refind_polygon=False):
919        """
920        Return a geospatial object which describes the
921        locations of the location file.
922
923        Note, if there is not location info, this returns None.
924
925        Not implemented:
926        if use_refind_polygon is True, only return values in the
927        refined polygon
928        """
929
930        return self._geospatial
931
932    ##
933    # @brief Add column to 'end' of CSV data.
934    # @param column_name The new column name.
935    # @param column_values The new column values.
936    # @param overwrite If True, overwrites last column, doesn't add at end.
937    def set_column(self, column_name, column_values, overwrite=False):
938        """
939        Add a column to the 'end' (with the right most column being the end)
940        of the csv file.
941
942        Set overwrite to True if you want to overwrite a column.
943
944        Note, in column_name white space is removed and case is not checked.
945        Precondition
946        The column_name and column_values cannot have comma's in it.
947        """
948
949        # sanity checks
950        value_row_count = \
951                len(self._attribute_dic[self._title_index_dic.keys()[0]])
952        if len(column_values) <> value_row_count:
953            msg = 'The number of column values must equal the number of rows.'
954            raise DataMissingValuesError, msg
955
956        # check new column name isn't already used, and we aren't overwriting
957        if self._attribute_dic.has_key(column_name):
958            if not overwrite:
959                msg = 'Column name %s already in use!' % column_name
960                raise TitleValueError, msg
961        else:
962            # New title.  Add it to the title index.
963            self._title_index_dic[column_name] = len(self._title_index_dic)
964
965        self._attribute_dic[column_name] = column_values
966
967    ##
968    # @brief Save the exposure CSV  file.
969    # @param file_name If supplied, use this filename, not original.
970    def save(self, file_name=None):
971        """
972        Save the exposure csv file
973        """
974
975        if file_name is None:
976            file_name = self._file_name
977
978        fd = open(file_name, 'wb')
979        writer = csv.writer(fd)
980
981        #Write the title to a cvs file
982        line = [None] * len(self._title_index_dic)
983        for title in self._title_index_dic.iterkeys():
984            line[self._title_index_dic[title]] = title
985        writer.writerow(line)
986
987        # Write the values to a cvs file
988        value_row_count = \
989                len(self._attribute_dic[self._title_index_dic.keys()[0]])
990        for row_i in range(value_row_count):
991            line = [None] * len(self._title_index_dic)
992            for title in self._title_index_dic.iterkeys():
993                line[self._title_index_dic[title]] = \
994                     self._attribute_dic[title][row_i]
995            writer.writerow(line)
996
997
998##
999# @brief Convert CSV file to a dictionary of arrays.
1000# @param file_name The path to the file to read.
1001def csv2array(file_name):
1002    """
1003    Convert CSV files of the form:
1004
1005    time, discharge, velocity
1006    0.0,  1.2,       0.0
1007    0.1,  3.2,       1.1
1008    ...
1009
1010    to a dictionary of numeric arrays.
1011
1012
1013    See underlying function csv2dict for more details.
1014    """
1015
1016    X, _ = csv2dict(file_name)
1017
1018    Y = {}
1019    for key in X.keys():
1020        Y[key] = array([float(x) for x in X[key]])
1021
1022    return Y
1023
1024
1025##
1026# @brief Read a CSV file and convert to a dictionary of {key: column}.
1027# @param file_name The path to the file to read.
1028# @param title_check_list List of titles that *must* be columns in the file.
1029# @return Two dicts: ({key:column}, {title:index}).
1030# @note WARNING: Values are returned as strings.
1031def csv2dict(file_name, title_check_list=None):
1032    """
1033    Load in the csv as a dictionary, title as key and column info as value.
1034    Also, create a dictionary, title as key and column index as value,
1035    to keep track of the column order.
1036
1037    Two dictionaries are returned.
1038
1039    WARNING: Values are returned as strings.
1040             Do this to change a list of strings to a list of floats
1041                 time = [float(x) for x in time]
1042    """
1043
1044    # FIXME(Ole): Consider dealing with files without headers
1045    # FIXME(Ole): Consider a wrapper automatically converting text fields
1046    #             to the right type by trying for: int, float, string
1047   
1048    attribute_dic = {}
1049    title_index_dic = {}
1050    titles_stripped = [] # List of titles
1051
1052    reader = csv.reader(file(file_name))
1053
1054    # Read in and manipulate the title info
1055    titles = reader.next()
1056    for i, title in enumerate(titles):
1057        header = title.strip()
1058        titles_stripped.append(header)
1059        title_index_dic[header] = i
1060    title_count = len(titles_stripped)
1061
1062    # Check required columns
1063    if title_check_list is not None:
1064        for title_check in title_check_list:
1065            if not title_index_dic.has_key(title_check):
1066                msg = 'Reading error. This row is not present %s' % title_check
1067                raise IOError, msg
1068
1069    # Create a dictionary of column values, indexed by column title
1070    for line in reader:
1071        n = len(line) # Number of entries
1072        if n != title_count:
1073            msg = 'Entry in file %s had %d columns ' % (file_name, n)
1074            msg += 'although there were %d headers' % title_count
1075            raise IOError, msg
1076        for i, value in enumerate(line):
1077            attribute_dic.setdefault(titles_stripped[i], []).append(value)
1078
1079    return attribute_dic, title_index_dic
1080
1081
1082##
1083# @brief
1084# @param filename
1085# @param x
1086# @param y
1087# @param z
1088def write_obj(filename, x, y, z):
1089    """Store x,y,z vectors into filename (obj format).
1090
1091       Vectors are assumed to have dimension (M,3) where
1092       M corresponds to the number elements.
1093       triangles are assumed to be disconnected
1094
1095       The three numbers in each vector correspond to three vertices,
1096
1097       e.g. the x coordinate of vertex 1 of element i is in x[i,1]
1098    """
1099
1100    import os.path
1101
1102    root, ext = os.path.splitext(filename)
1103    if ext == '.obj':
1104        FN = filename
1105    else:
1106        FN = filename + '.obj'
1107
1108    outfile = open(FN, 'wb')
1109    outfile.write("# Triangulation as an obj file\n")
1110
1111    M, N = x.shape
1112    assert N == 3  #Assuming three vertices per element
1113
1114    for i in range(M):
1115        for j in range(N):
1116            outfile.write("v %f %f %f\n" % (x[i,j], y[i,j], z[i,j]))
1117
1118    for i in range(M):
1119        base = i * N
1120        outfile.write("f %d %d %d\n" % (base+1, base+2, base+3))
1121
1122    outfile.close()
1123
1124
1125#########################################################
1126#Conversion routines
1127########################################################
1128
1129##
1130# @brief Convert SWW data to OBJ data.
1131# @param basefilename Stem of filename, needs size and extension added.
1132# @param size The number of lines to write.
1133def sww2obj(basefilename, size):
1134    """Convert netcdf based data output to obj
1135    """
1136
1137    from Scientific.IO.NetCDF import NetCDFFile
1138    from Numeric import Float, zeros
1139
1140    # Get NetCDF
1141    FN = create_filename('.', basefilename, 'sww', size)
1142    print 'Reading from ', FN
1143    fid = NetCDFFile(FN, netcdf_mode_r)  #Open existing file for read
1144
1145    # Get the variables
1146    x = fid.variables['x']
1147    y = fid.variables['y']
1148    z = fid.variables['elevation']
1149    time = fid.variables['time']
1150    stage = fid.variables['stage']
1151
1152    M = size  #Number of lines
1153    xx = zeros((M,3), Float)
1154    yy = zeros((M,3), Float)
1155    zz = zeros((M,3), Float)
1156
1157    for i in range(M):
1158        for j in range(3):
1159            xx[i,j] = x[i+j*M]
1160            yy[i,j] = y[i+j*M]
1161            zz[i,j] = z[i+j*M]
1162
1163    # Write obj for bathymetry
1164    FN = create_filename('.', basefilename, 'obj', size)
1165    write_obj(FN,xx,yy,zz)
1166
1167    # Now read all the data with variable information, combine with
1168    # x,y info and store as obj
1169    for k in range(len(time)):
1170        t = time[k]
1171        print 'Processing timestep %f' %t
1172
1173        for i in range(M):
1174            for j in range(3):
1175                zz[i,j] = stage[k,i+j*M]
1176
1177        #Write obj for variable data
1178        #FN = create_filename(basefilename, 'obj', size, time=t)
1179        FN = create_filename('.', basefilename[:5], 'obj', size, time=t)
1180        write_obj(FN, xx, yy, zz)
1181
1182
1183##
1184# @brief
1185# @param basefilename Stem of filename, needs size and extension added.
1186def dat2obj(basefilename):
1187    """Convert line based data output to obj
1188    FIXME: Obsolete?
1189    """
1190
1191    import glob, os
1192    from anuga.config import data_dir
1193    from Numeric import zeros, Float
1194
1195    # Get bathymetry and x,y's
1196    lines = open(data_dir+os.sep+basefilename+'_geometry.dat', 'r').readlines()
1197
1198    M = len(lines)  #Number of lines
1199    x = zeros((M,3), Float)
1200    y = zeros((M,3), Float)
1201    z = zeros((M,3), Float)
1202
1203    for i, line in enumerate(lines):
1204        tokens = line.split()
1205        values = map(float, tokens)
1206
1207        for j in range(3):
1208            x[i,j] = values[j*3]
1209            y[i,j] = values[j*3+1]
1210            z[i,j] = values[j*3+2]
1211
1212    # Write obj for bathymetry
1213    write_obj(data_dir + os.sep + basefilename + '_geometry', x, y, z)
1214
1215    # Now read all the data files with variable information, combine with
1216    # x,y info and store as obj.
1217
1218    files = glob.glob(data_dir + os.sep + basefilename + '*.dat')
1219    for filename in files:
1220        print 'Processing %s' % filename
1221
1222        lines = open(data_dir + os.sep + filename, 'r').readlines()
1223        assert len(lines) == M
1224        root, ext = os.path.splitext(filename)
1225
1226        # Get time from filename
1227        i0 = filename.find('_time=')
1228        if i0 == -1:
1229            #Skip bathymetry file
1230            continue
1231
1232        i0 += 6  #Position where time starts
1233        i1 = filename.find('.dat')
1234
1235        if i1 > i0:
1236            t = float(filename[i0:i1])
1237        else:
1238            raise DataTimeError, 'Hmmmm'
1239
1240        for i, line in enumerate(lines):
1241            tokens = line.split()
1242            values = map(float,tokens)
1243
1244            for j in range(3):
1245                z[i,j] = values[j]
1246
1247        # Write obj for variable data
1248        write_obj(data_dir + os.sep + basefilename + '_time=%.4f' % t, x, y, z)
1249
1250
1251##
1252# @brief Filter data file, selecting timesteps first:step:last.
1253# @param filename1 Data file to filter.
1254# @param filename2 File to write filtered timesteps to.
1255# @param first First timestep.
1256# @param last Last timestep.
1257# @param step Timestep stride.
1258def filter_netcdf(filename1, filename2, first=0, last=None, step=1):
1259    """Read netcdf filename1, pick timesteps first:step:last and save to
1260    nettcdf file filename2
1261    """
1262
1263    from Scientific.IO.NetCDF import NetCDFFile
1264
1265    # Get NetCDF
1266    infile = NetCDFFile(filename1, netcdf_mode_r)  #Open existing file for read
1267    outfile = NetCDFFile(filename2, netcdf_mode_w)  #Open new file
1268
1269    # Copy dimensions
1270    for d in infile.dimensions:
1271        outfile.createDimension(d, infile.dimensions[d])
1272
1273    # Copy variable definitions
1274    for name in infile.variables:
1275        var = infile.variables[name]
1276        outfile.createVariable(name, var.typecode(), var.dimensions)
1277
1278    # Copy the static variables
1279    for name in infile.variables:
1280        if name == 'time' or name == 'stage':
1281            pass
1282        else:
1283            outfile.variables[name][:] = infile.variables[name][:]
1284
1285    # Copy selected timesteps
1286    time = infile.variables['time']
1287    stage = infile.variables['stage']
1288
1289    newtime = outfile.variables['time']
1290    newstage = outfile.variables['stage']
1291
1292    if last is None:
1293        last = len(time)
1294
1295    selection = range(first, last, step)
1296    for i, j in enumerate(selection):
1297        print 'Copying timestep %d of %d (%f)' % (j, last-first, time[j])
1298        newtime[i] = time[j]
1299        newstage[i,:] = stage[j,:]
1300
1301    # Close
1302    infile.close()
1303    outfile.close()
1304
1305
1306##
1307# @brief Return instance of class of given format using filename.
1308# @param domain Data domain (eg, 'sww', etc).
1309# @param mode The mode to open domain in.
1310# @return A class instance of required domain and mode.
1311#Get data objects
1312def get_dataobject(domain, mode=netcdf_mode_w):
1313    """Return instance of class of given format using filename
1314    """
1315
1316    cls = eval('Data_format_%s' % domain.format)
1317    return cls(domain, mode)
1318
1319
1320##
1321# @brief Convert DEM data  to PTS data.
1322# @param basename_in Stem of input filename.
1323# @param basename_out Stem of output filename.
1324# @param easting_min
1325# @param easting_max
1326# @param northing_min
1327# @param northing_max
1328# @param use_cache
1329# @param verbose
1330# @return
1331def dem2pts(basename_in, basename_out=None,
1332            easting_min=None, easting_max=None,
1333            northing_min=None, northing_max=None,
1334            use_cache=False, verbose=False,):
1335    """Read Digitial Elevation model from the following NetCDF format (.dem)
1336
1337    Example:
1338
1339    ncols         3121
1340    nrows         1800
1341    xllcorner     722000
1342    yllcorner     5893000
1343    cellsize      25
1344    NODATA_value  -9999
1345    138.3698 137.4194 136.5062 135.5558 ..........
1346
1347    Convert to NetCDF pts format which is
1348
1349    points:  (Nx2) Float array
1350    elevation: N Float array
1351    """
1352
1353    kwargs = {'basename_out': basename_out,
1354              'easting_min': easting_min,
1355              'easting_max': easting_max,
1356              'northing_min': northing_min,
1357              'northing_max': northing_max,
1358              'verbose': verbose}
1359
1360    if use_cache is True:
1361        from caching import cache
1362        result = cache(_dem2pts, basename_in, kwargs,
1363                       dependencies = [basename_in + '.dem'],
1364                       verbose = verbose)
1365
1366    else:
1367        result = apply(_dem2pts, [basename_in], kwargs)
1368
1369    return result
1370
1371
1372##
1373# @brief
1374# @param basename_in
1375# @param basename_out
1376# @param verbose
1377# @param easting_min
1378# @param easting_max
1379# @param northing_min
1380# @param northing_max
1381def _dem2pts(basename_in, basename_out=None, verbose=False,
1382            easting_min=None, easting_max=None,
1383            northing_min=None, northing_max=None):
1384    """Read Digitial Elevation model from the following NetCDF format (.dem)
1385
1386    Internal function. See public function dem2pts for details.
1387    """
1388
1389    # FIXME: Can this be written feasibly using write_pts?
1390
1391    import os
1392    from Scientific.IO.NetCDF import NetCDFFile
1393    from Numeric import Float, zeros, reshape, sum
1394
1395    root = basename_in
1396
1397    # Get NetCDF
1398    infile = NetCDFFile(root + '.dem', netcdf_mode_r) 
1399
1400    if verbose: print 'Reading DEM from %s' %(root + '.dem')
1401
1402    ncols = infile.ncols[0]
1403    nrows = infile.nrows[0]
1404    xllcorner = infile.xllcorner[0]  # Easting of lower left corner
1405    yllcorner = infile.yllcorner[0]  # Northing of lower left corner
1406    cellsize = infile.cellsize[0]
1407    NODATA_value = infile.NODATA_value[0]
1408    dem_elevation = infile.variables['elevation']
1409
1410    zone = infile.zone[0]
1411    false_easting = infile.false_easting[0]
1412    false_northing = infile.false_northing[0]
1413
1414    # Text strings
1415    projection = infile.projection
1416    datum = infile.datum
1417    units = infile.units
1418
1419    # Get output file
1420    if basename_out == None:
1421        ptsname = root + '.pts'
1422    else:
1423        ptsname = basename_out + '.pts'
1424
1425    if verbose: print 'Store to NetCDF file %s' %ptsname
1426
1427    # NetCDF file definition
1428    outfile = NetCDFFile(ptsname, netcdf_mode_w)
1429
1430    # Create new file
1431    outfile.institution = 'Geoscience Australia'
1432    outfile.description = 'NetCDF pts format for compact and portable ' \
1433                          'storage of spatial point data'
1434
1435    # Assign default values
1436    if easting_min is None: easting_min = xllcorner
1437    if easting_max is None: easting_max = xllcorner + ncols*cellsize
1438    if northing_min is None: northing_min = yllcorner
1439    if northing_max is None: northing_max = yllcorner + nrows*cellsize
1440
1441    # Compute offsets to update georeferencing
1442    easting_offset = xllcorner - easting_min
1443    northing_offset = yllcorner - northing_min
1444
1445    # Georeferencing
1446    outfile.zone = zone
1447    outfile.xllcorner = easting_min # Easting of lower left corner
1448    outfile.yllcorner = northing_min # Northing of lower left corner
1449    outfile.false_easting = false_easting
1450    outfile.false_northing = false_northing
1451
1452    outfile.projection = projection
1453    outfile.datum = datum
1454    outfile.units = units
1455
1456    # Grid info (FIXME: probably not going to be used, but heck)
1457    outfile.ncols = ncols
1458    outfile.nrows = nrows
1459
1460    dem_elevation_r = reshape(dem_elevation, (nrows, ncols))
1461    totalnopoints = nrows*ncols
1462
1463    # Calculating number of NODATA_values for each row in clipped region
1464    # FIXME: use array operations to do faster
1465    nn = 0
1466    k = 0
1467    i1_0 = 0
1468    j1_0 = 0
1469    thisj = 0
1470    thisi = 0
1471    for i in range(nrows):
1472        y = (nrows-i-1)*cellsize + yllcorner
1473        for j in range(ncols):
1474            x = j*cellsize + xllcorner
1475            if easting_min <= x <= easting_max \
1476               and northing_min <= y <= northing_max:
1477                thisj = j
1478                thisi = i
1479                if dem_elevation_r[i,j] == NODATA_value:
1480                    nn += 1
1481
1482                if k == 0:
1483                    i1_0 = i
1484                    j1_0 = j
1485
1486                k += 1
1487
1488    index1 = j1_0
1489    index2 = thisj
1490
1491    # Dimension definitions
1492    nrows_in_bounding_box = int(round((northing_max-northing_min)/cellsize))
1493    ncols_in_bounding_box = int(round((easting_max-easting_min)/cellsize))
1494
1495    clippednopoints = (thisi+1-i1_0)*(thisj+1-j1_0)
1496    nopoints = clippednopoints-nn
1497
1498    clipped_dem_elev = dem_elevation_r[i1_0:thisi+1,j1_0:thisj+1]
1499
1500    if verbose:
1501        print 'There are %d values in the elevation' %totalnopoints
1502        print 'There are %d values in the clipped elevation' %clippednopoints
1503        print 'There are %d NODATA_values in the clipped elevation' %nn
1504
1505    outfile.createDimension('number_of_points', nopoints)
1506    outfile.createDimension('number_of_dimensions', 2) #This is 2d data
1507
1508    # Variable definitions
1509    outfile.createVariable('points', Float, ('number_of_points',
1510                                             'number_of_dimensions'))
1511    outfile.createVariable('elevation', Float, ('number_of_points',))
1512
1513    # Get handles to the variables
1514    points = outfile.variables['points']
1515    elevation = outfile.variables['elevation']
1516
1517    lenv = index2-index1+1
1518
1519    # Store data
1520    global_index = 0
1521    # for i in range(nrows):
1522    for i in range(i1_0, thisi+1, 1):
1523        if verbose and i % ((nrows+10)/10) == 0:
1524            print 'Processing row %d of %d' % (i, nrows)
1525
1526        lower_index = global_index
1527
1528        v = dem_elevation_r[i,index1:index2+1]
1529        no_NODATA = sum(v == NODATA_value)
1530        if no_NODATA > 0:
1531            newcols = lenv - no_NODATA  # ncols_in_bounding_box - no_NODATA
1532        else:
1533            newcols = lenv              # ncols_in_bounding_box
1534
1535        telev = zeros(newcols, Float)
1536        tpoints = zeros((newcols, 2), Float)
1537
1538        local_index = 0
1539
1540        y = (nrows-i-1)*cellsize + yllcorner
1541        #for j in range(ncols):
1542        for j in range(j1_0,index2+1,1):
1543            x = j*cellsize + xllcorner
1544            if easting_min <= x <= easting_max \
1545               and northing_min <= y <= northing_max \
1546               and dem_elevation_r[i,j] <> NODATA_value:
1547                tpoints[local_index, :] = [x-easting_min, y-northing_min]
1548                telev[local_index] = dem_elevation_r[i, j]
1549                global_index += 1
1550                local_index += 1
1551
1552        upper_index = global_index
1553
1554        if upper_index == lower_index + newcols:
1555            points[lower_index:upper_index, :] = tpoints
1556            elevation[lower_index:upper_index] = telev
1557
1558    assert global_index == nopoints, 'index not equal to number of points'
1559
1560    infile.close()
1561    outfile.close()
1562
1563
1564##
1565# @brief  Return block of surface lines for each cross section
1566# @param lines Iterble  of text lines to process.
1567# @note BROKEN?  UNUSED?
1568def _read_hecras_cross_sections(lines):
1569    """Return block of surface lines for each cross section
1570    Starts with SURFACE LINE,
1571    Ends with END CROSS-SECTION
1572    """
1573
1574    points = []
1575
1576    reading_surface = False
1577    for i, line in enumerate(lines):
1578        if len(line.strip()) == 0:    #Ignore blanks
1579            continue
1580
1581        if lines[i].strip().startswith('SURFACE LINE'):
1582            reading_surface = True
1583            continue
1584
1585        if lines[i].strip().startswith('END') and reading_surface:
1586            yield points
1587            reading_surface = False
1588            points = []
1589
1590        if reading_surface:
1591            fields = line.strip().split(',')
1592            easting = float(fields[0])
1593            northing = float(fields[1])
1594            elevation = float(fields[2])
1595            points.append([easting, northing, elevation])
1596
1597
1598##
1599# @brief Convert HECRAS (.sdf) file to PTS file.
1600# @param basename_in Sterm of input filename.
1601# @param basename_out Sterm of output filename.
1602# @param verbose True if this function is to be verbose.
1603def hecras_cross_sections2pts(basename_in,
1604                              basename_out=None,
1605                              verbose=False):
1606    """Read HEC-RAS Elevation datal from the following ASCII format (.sdf)
1607
1608    Example:
1609
1610# RAS export file created on Mon 15Aug2005 11:42
1611# by HEC-RAS Version 3.1.1
1612
1613BEGIN HEADER:
1614  UNITS: METRIC
1615  DTM TYPE: TIN
1616  DTM: v:\1\cit\perth_topo\river_tin
1617  STREAM LAYER: c:\local\hecras\21_02_03\up_canning_cent3d.shp
1618  CROSS-SECTION LAYER: c:\local\hecras\21_02_03\up_can_xs3d.shp
1619  MAP PROJECTION: UTM
1620  PROJECTION ZONE: 50
1621  DATUM: AGD66
1622  VERTICAL DATUM:
1623  NUMBER OF REACHES:  19
1624  NUMBER OF CROSS-SECTIONS:  14206
1625END HEADER:
1626
1627Only the SURFACE LINE data of the following form will be utilised
1628  CROSS-SECTION:
1629    STREAM ID:Southern-Wungong
1630    REACH ID:Southern-Wungong
1631    STATION:19040.*
1632    CUT LINE:
1633      405548.671603161 , 6438142.7594925
1634      405734.536092045 , 6438326.10404912
1635      405745.130459356 , 6438331.48627354
1636      405813.89633823 , 6438368.6272789
1637    SURFACE LINE:
1638     405548.67,   6438142.76,   35.37
1639     405552.24,   6438146.28,   35.41
1640     405554.78,   6438148.78,   35.44
1641     405555.80,   6438149.79,   35.44
1642     405559.37,   6438153.31,   35.45
1643     405560.88,   6438154.81,   35.44
1644     405562.93,   6438156.83,   35.42
1645     405566.50,   6438160.35,   35.38
1646     405566.99,   6438160.83,   35.37
1647     ...
1648   END CROSS-SECTION
1649
1650    Convert to NetCDF pts format which is
1651
1652    points:  (Nx2) Float array
1653    elevation: N Float array
1654    """
1655
1656    import os
1657    from Scientific.IO.NetCDF import NetCDFFile
1658    from Numeric import Float, zeros, reshape
1659
1660    root = basename_in
1661
1662    # Get ASCII file
1663    infile = open(root + '.sdf', 'r')
1664
1665    if verbose: print 'Reading DEM from %s' %(root + '.sdf')
1666
1667    lines = infile.readlines()
1668    infile.close()
1669
1670    if verbose: print 'Converting to pts format'
1671
1672    # Scan through the header, picking up stuff we need.
1673    i = 0
1674    while lines[i].strip() == '' or lines[i].strip().startswith('#'):
1675        i += 1
1676
1677    assert lines[i].strip().upper() == 'BEGIN HEADER:'
1678    i += 1
1679
1680    assert lines[i].strip().upper().startswith('UNITS:')
1681    units = lines[i].strip().split()[1]
1682    i += 1
1683
1684    assert lines[i].strip().upper().startswith('DTM TYPE:')
1685    i += 1
1686
1687    assert lines[i].strip().upper().startswith('DTM:')
1688    i += 1
1689
1690    assert lines[i].strip().upper().startswith('STREAM')
1691    i += 1
1692
1693    assert lines[i].strip().upper().startswith('CROSS')
1694    i += 1
1695
1696    assert lines[i].strip().upper().startswith('MAP PROJECTION:')
1697    projection = lines[i].strip().split(':')[1]
1698    i += 1
1699
1700    assert lines[i].strip().upper().startswith('PROJECTION ZONE:')
1701    zone = int(lines[i].strip().split(':')[1])
1702    i += 1
1703
1704    assert lines[i].strip().upper().startswith('DATUM:')
1705    datum = lines[i].strip().split(':')[1]
1706    i += 1
1707
1708    assert lines[i].strip().upper().startswith('VERTICAL DATUM:')
1709    i += 1
1710
1711    assert lines[i].strip().upper().startswith('NUMBER OF REACHES:')
1712    i += 1
1713
1714    assert lines[i].strip().upper().startswith('NUMBER OF CROSS-SECTIONS:')
1715    number_of_cross_sections = int(lines[i].strip().split(':')[1])
1716    i += 1
1717
1718    # Now read all points
1719    points = []
1720    elevation = []
1721    for j, entries in enumerate(_read_hecras_cross_sections(lines[i:])):
1722        for k, entry in enumerate(entries):
1723            points.append(entry[:2])
1724            elevation.append(entry[2])
1725
1726    msg = 'Actual #number_of_cross_sections == %d, Reported as %d'\
1727          %(j+1, number_of_cross_sections)
1728    assert j+1 == number_of_cross_sections, msg
1729
1730    # Get output file, write PTS data
1731    if basename_out == None:
1732        ptsname = root + '.pts'
1733    else:
1734        ptsname = basename_out + '.pts'
1735
1736    geo_ref = Geo_reference(zone, 0, 0, datum, projection, units)
1737    geo = Geospatial_data(points, {"elevation":elevation},
1738                          verbose=verbose, geo_reference=geo_ref)
1739    geo.export_points_file(ptsname)
1740
1741
1742##
1743# @brief
1744# @param basename_in
1745# @param extra_name_out
1746# @param quantities
1747# @param timestep
1748# @param reduction
1749# @param cellsize
1750# @param number_of_decimal_places
1751# @param NODATA_value
1752# @param easting_min
1753# @param easting_max
1754# @param northing_min
1755# @param northing_max
1756# @param verbose
1757# @param origin
1758# @param datum
1759# @param format
1760# @return
1761def export_grid(basename_in, extra_name_out=None,
1762                quantities=None, # defaults to elevation
1763                timestep=None,
1764                reduction=None,
1765                cellsize=10,
1766                number_of_decimal_places=None,
1767                NODATA_value=-9999,
1768                easting_min=None,
1769                easting_max=None,
1770                northing_min=None,
1771                northing_max=None,
1772                verbose=False,
1773                origin=None,
1774                datum='WGS84',
1775                format='ers'):
1776    """Wrapper for sww2dem.
1777    See sww2dem to find out what most of the parameters do.
1778
1779    Quantities is a list of quantities.  Each quantity will be
1780    calculated for each sww file.
1781
1782    This returns the basenames of the files returned, which is made up
1783    of the dir and all of the file name, except the extension.
1784
1785    This function returns the names of the files produced.
1786
1787    It will also produce as many output files as there are input sww files.
1788    """
1789
1790    if quantities is None:
1791        quantities = ['elevation']
1792
1793    if type(quantities) is str:
1794            quantities = [quantities]
1795
1796    # How many sww files are there?
1797    dir, base = os.path.split(basename_in)
1798
1799    iterate_over = get_all_swwfiles(dir, base, verbose)
1800
1801    if dir == "":
1802        dir = "." # Unix compatibility
1803
1804    files_out = []
1805    for sww_file in iterate_over:
1806        for quantity in quantities:
1807            if extra_name_out is None:
1808                basename_out = sww_file + '_' + quantity
1809            else:
1810                basename_out = sww_file + '_' + quantity + '_' + extra_name_out
1811
1812            file_out = sww2dem(dir+sep+sww_file, dir+sep+basename_out,
1813                               quantity,
1814                               timestep,
1815                               reduction,
1816                               cellsize,
1817                               number_of_decimal_places,
1818                               NODATA_value,
1819                               easting_min,
1820                               easting_max,
1821                               northing_min,
1822                               northing_max,
1823                               verbose,
1824                               origin,
1825                               datum,
1826                               format)
1827            files_out.append(file_out)
1828    return files_out
1829
1830
1831##
1832# @brief
1833# @param production_dirs
1834# @param output_dir
1835# @param scenario_name
1836# @param gauges_dir_name
1837# @param plot_quantity
1838# @param generate_fig
1839# @param reportname
1840# @param surface
1841# @param time_min
1842# @param time_max
1843# @param title_on
1844# @param verbose
1845# @param nodes
1846def get_timeseries(production_dirs, output_dir, scenario_name, gauges_dir_name,
1847                   plot_quantity, generate_fig=False,
1848                   reportname=None, surface=False, time_min=None,
1849                   time_max=None, title_on=False, verbose=True,
1850                   nodes=None):
1851    """
1852    nodes - number of processes used.
1853
1854    warning - this function has no tests
1855    """
1856
1857    if reportname == None:
1858        report = False
1859    else:
1860        report = True
1861
1862    if nodes is None:
1863        is_parallel = False
1864    else:
1865        is_parallel = True
1866
1867    # Generate figures
1868    swwfiles = {}
1869    if is_parallel is True:
1870        for i in range(nodes):
1871            print 'Sending node %d of %d' % (i, nodes)
1872            swwfiles = {}
1873            if not reportname == None:
1874                reportname = report_name + '_%s' % i
1875            for label_id in production_dirs.keys():
1876                if label_id == 'boundaries':
1877                    swwfile = best_boundary_sww
1878                else:
1879                    file_loc = output_dir + label_id + sep
1880                    sww_extra = '_P%s_%s' % (i, nodes)
1881                    swwfile = file_loc + scenario_name + sww_extra + '.sww'
1882                    print 'swwfile', swwfile
1883                    swwfiles[swwfile] = label_id
1884
1885                texname, elev_output = sww2timeseries(swwfiles,
1886                                              gauges_dir_name,
1887                                              production_dirs,
1888                                              report=report,
1889                                              reportname=reportname,
1890                                              plot_quantity=plot_quantity,
1891                                              generate_fig=generate_fig,
1892                                              surface=surface,
1893                                              time_min=time_min,
1894                                              time_max=time_max,
1895                                              title_on=title_on,
1896                                              verbose=verbose)
1897    else:
1898        for label_id in production_dirs.keys():
1899            if label_id == 'boundaries':
1900                print 'boundaries'
1901                file_loc = project.boundaries_in_dir
1902                swwfile = project.boundaries_dir_name3 + '.sww'
1903                #  swwfile = boundary_dir_filename
1904            else:
1905                file_loc = output_dir + label_id + sep
1906                swwfile = file_loc + scenario_name + '.sww'
1907            swwfiles[swwfile] = label_id
1908
1909        texname, elev_output = sww2timeseries(swwfiles,
1910                                              gauges_dir_name,
1911                                              production_dirs,
1912                                              report=report,
1913                                              reportname=reportname,
1914                                              plot_quantity=plot_quantity,
1915                                              generate_fig=generate_fig,
1916                                              surface=surface,
1917                                              time_min=time_min,
1918                                              time_max=time_max,
1919                                              title_on=title_on,
1920                                              verbose=verbose)
1921
1922
1923##
1924# @brief Convert SWW file to DEM file (.asc or .ers).
1925# @param basename_in
1926# @param basename_out
1927# @param quantity
1928# @param timestep
1929# @param reduction
1930# @param cellsize 
1931# @param number_of_decimal_places
1932# @param NODATA_value
1933# @param easting_min
1934# @param easting_max
1935# @param northing_min
1936# @param northing_max
1937# @param verbose
1938# @param origin
1939# @param datum
1940# @param format
1941# @return
1942def sww2dem(basename_in, basename_out=None,
1943            quantity=None, # defaults to elevation
1944            timestep=None,
1945            reduction=None,
1946            cellsize=10,
1947            number_of_decimal_places=None,
1948            NODATA_value=-9999,
1949            easting_min=None,
1950            easting_max=None,
1951            northing_min=None,
1952            northing_max=None,
1953            verbose=False,
1954            origin=None,
1955            datum='WGS84',
1956            format='ers'):
1957    """Read SWW file and convert to Digitial Elevation model format
1958    (.asc or .ers)
1959
1960    Example (ASC):
1961    ncols         3121
1962    nrows         1800
1963    xllcorner     722000
1964    yllcorner     5893000
1965    cellsize      25
1966    NODATA_value  -9999
1967    138.3698 137.4194 136.5062 135.5558 ..........
1968
1969    The number of decimal places can be specified by the user to save
1970    on disk space requirements by specifying in the call to sww2dem.
1971
1972    Also write accompanying file with same basename_in but extension .prj
1973    used to fix the UTM zone, datum, false northings and eastings.
1974
1975    The prj format is assumed to be as
1976
1977    Projection    UTM
1978    Zone          56
1979    Datum         WGS84
1980    Zunits        NO
1981    Units         METERS
1982    Spheroid      WGS84
1983    Xshift        0.0000000000
1984    Yshift        10000000.0000000000
1985    Parameters
1986
1987
1988    The parameter quantity must be the name of an existing quantity or
1989    an expression involving existing quantities. The default is
1990    'elevation'. Quantity is not a list of quantities.
1991
1992    if timestep (an index) is given, output quantity at that timestep
1993
1994    if reduction is given use that to reduce quantity over all timesteps.
1995
1996    datum
1997
1998    format can be either 'asc' or 'ers'
1999    """
2000
2001    import sys
2002    from Numeric import array, Float, concatenate, NewAxis, zeros, reshape
2003    from Numeric import array2string, sometrue
2004
2005    from anuga.utilities.polygon import inside_polygon, outside_polygon, \
2006         separate_points_by_polygon
2007    from anuga.abstract_2d_finite_volumes.util import \
2008         apply_expression_to_dictionary
2009
2010    msg = 'Format must be either asc or ers'
2011    assert format.lower() in ['asc', 'ers'], msg
2012
2013    false_easting = 500000
2014    false_northing = 10000000
2015
2016    if quantity is None:
2017        quantity = 'elevation'
2018
2019    if reduction is None:
2020        reduction = max
2021
2022    if basename_out is None:
2023        basename_out = basename_in + '_%s' % quantity
2024
2025    if quantity_formula.has_key(quantity):
2026        quantity = quantity_formula[quantity]
2027
2028    if number_of_decimal_places is None:
2029        number_of_decimal_places = 3
2030
2031    swwfile = basename_in + '.sww'
2032    demfile = basename_out + '.' + format
2033    # Note the use of a .ers extension is optional (write_ermapper_grid will
2034    # deal with either option
2035
2036    # Read sww file
2037    if verbose:
2038        print 'Reading from %s' % swwfile
2039        print 'Output directory is %s' % basename_out
2040
2041    from Scientific.IO.NetCDF import NetCDFFile
2042    fid = NetCDFFile(swwfile)
2043
2044    #Get extent and reference
2045    x = fid.variables['x'][:]
2046    y = fid.variables['y'][:]
2047    volumes = fid.variables['volumes'][:]
2048    if timestep is not None:
2049        times = fid.variables['time'][timestep]
2050    else:
2051        times = fid.variables['time'][:]
2052
2053    number_of_timesteps = fid.dimensions['number_of_timesteps']
2054    number_of_points = fid.dimensions['number_of_points']
2055
2056    if origin is None:
2057        # Get geo_reference
2058        # sww files don't have to have a geo_ref
2059        try:
2060            geo_reference = Geo_reference(NetCDFObject=fid)
2061        except AttributeError, e:
2062            geo_reference = Geo_reference() # Default georef object
2063
2064        xllcorner = geo_reference.get_xllcorner()
2065        yllcorner = geo_reference.get_yllcorner()
2066        zone = geo_reference.get_zone()
2067    else:
2068        zone = origin[0]
2069        xllcorner = origin[1]
2070        yllcorner = origin[2]
2071
2072    # FIXME: Refactor using code from Interpolation_function.statistics
2073    # (in interpolate.py)
2074    # Something like print swwstats(swwname)
2075    if verbose:
2076        print '------------------------------------------------'
2077        print 'Statistics of SWW file:'
2078        print '  Name: %s' %swwfile
2079        print '  Reference:'
2080        print '    Lower left corner: [%f, %f]'\
2081              %(xllcorner, yllcorner)
2082        if timestep is not None:
2083            print '    Time: %f' %(times)
2084        else:
2085            print '    Start time: %f' %fid.starttime[0]
2086        print '  Extent:'
2087        print '    x [m] in [%f, %f], len(x) == %d'\
2088              %(min(x.flat), max(x.flat), len(x.flat))
2089        print '    y [m] in [%f, %f], len(y) == %d'\
2090              %(min(y.flat), max(y.flat), len(y.flat))
2091        if timestep is not None:
2092            print '    t [s] = %f, len(t) == %d' %(times, 1)
2093        else:
2094            print '    t [s] in [%f, %f], len(t) == %d'\
2095              %(min(times), max(times), len(times))
2096        print '  Quantities [SI units]:'
2097        # Comment out for reduced memory consumption
2098        for name in ['stage', 'xmomentum', 'ymomentum']:
2099            q = fid.variables[name][:].flat
2100            if timestep is not None:
2101                q = q[timestep*len(x):(timestep+1)*len(x)]
2102            if verbose: print '    %s in [%f, %f]' %(name, min(q), max(q))
2103        for name in ['elevation']:
2104            q = fid.variables[name][:].flat
2105            if verbose: print '    %s in [%f, %f]' %(name, min(q), max(q))
2106
2107    # Get quantity and reduce if applicable
2108    if verbose: print 'Processing quantity %s' %quantity
2109
2110    # Turn NetCDF objects into Numeric arrays
2111    try:
2112        q = fid.variables[quantity][:]
2113    except:
2114        quantity_dict = {}
2115        for name in fid.variables.keys():
2116            quantity_dict[name] = fid.variables[name][:]
2117        #Convert quantity expression to quantities found in sww file
2118        q = apply_expression_to_dictionary(quantity, quantity_dict)
2119
2120    if len(q.shape) == 2:
2121        #q has a time component, must be reduced alongthe temporal dimension
2122        if verbose: print 'Reducing quantity %s' %quantity
2123        q_reduced = zeros(number_of_points, Float)
2124
2125        if timestep is not None:
2126            for k in range(number_of_points):
2127                q_reduced[k] = q[timestep,k]
2128        else:
2129            for k in range(number_of_points):
2130                q_reduced[k] = reduction( q[:,k] )
2131
2132        q = q_reduced
2133
2134    #Post condition: Now q has dimension: number_of_points
2135    assert len(q.shape) == 1
2136    assert q.shape[0] == number_of_points
2137
2138    if verbose:
2139        print 'Processed values for %s are in [%f, %f]' % \
2140              (quantity, min(q), max(q))
2141
2142    #Create grid and update xll/yll corner and x,y
2143    #Relative extent
2144    if easting_min is None:
2145        xmin = min(x)
2146    else:
2147        xmin = easting_min - xllcorner
2148
2149    if easting_max is None:
2150        xmax = max(x)
2151    else:
2152        xmax = easting_max - xllcorner
2153
2154    if northing_min is None:
2155        ymin = min(y)
2156    else:
2157        ymin = northing_min - yllcorner
2158
2159    if northing_max is None:
2160        ymax = max(y)
2161    else:
2162        ymax = northing_max - yllcorner
2163
2164    msg = 'xmax must be greater than or equal to xmin.\n'
2165    msg += 'I got xmin = %f, xmax = %f' %(xmin, xmax)
2166    assert xmax >= xmin, msg
2167
2168    msg = 'yax must be greater than or equal to xmin.\n'
2169    msg += 'I got ymin = %f, ymax = %f' %(ymin, ymax)
2170    assert ymax >= ymin, msg
2171
2172    if verbose: print 'Creating grid'
2173    ncols = int((xmax-xmin)/cellsize) + 1
2174    nrows = int((ymax-ymin)/cellsize) + 1
2175
2176    #New absolute reference and coordinates
2177    newxllcorner = xmin + xllcorner
2178    newyllcorner = ymin + yllcorner
2179
2180    x = x + xllcorner - newxllcorner
2181    y = y + yllcorner - newyllcorner
2182
2183    vertex_points = concatenate ((x[:,NewAxis], y[:,NewAxis]), axis=1)
2184    assert len(vertex_points.shape) == 2
2185
2186    grid_points = zeros ((ncols*nrows, 2), Float)
2187
2188    for i in xrange(nrows):
2189        if format.lower() == 'asc':
2190            yg = i * cellsize
2191        else:
2192        #this will flip the order of the y values for ers
2193            yg = (nrows-i) * cellsize
2194
2195        for j in xrange(ncols):
2196            xg = j * cellsize
2197            k = i*ncols + j
2198
2199            grid_points[k, 0] = xg
2200            grid_points[k, 1] = yg
2201
2202    #Interpolate
2203    from anuga.fit_interpolate.interpolate import Interpolate
2204
2205    # Remove loners from vertex_points, volumes here
2206    vertex_points, volumes = remove_lone_verts(vertex_points, volumes)
2207    #export_mesh_file('monkey.tsh',{'vertices':vertex_points, 'triangles':volumes})
2208    #import sys; sys.exit()
2209    interp = Interpolate(vertex_points, volumes, verbose = verbose)
2210
2211    #Interpolate using quantity values
2212    if verbose: print 'Interpolating'
2213    grid_values = interp.interpolate(q, grid_points).flat
2214
2215    if verbose:
2216        print 'Interpolated values are in [%f, %f]' %(min(grid_values),
2217                                                      max(grid_values))
2218
2219    #Assign NODATA_value to all points outside bounding polygon (from interpolation mesh)
2220    P = interp.mesh.get_boundary_polygon()
2221    outside_indices = outside_polygon(grid_points, P, closed=True)
2222
2223    for i in outside_indices:
2224        grid_values[i] = NODATA_value
2225
2226    if format.lower() == 'ers':
2227        # setup ERS header information
2228        grid_values = reshape(grid_values, (nrows, ncols))
2229        header = {}
2230        header['datum'] = '"' + datum + '"'
2231        # FIXME The use of hardwired UTM and zone number needs to be made optional
2232        # FIXME Also need an automatic test for coordinate type (i.e. EN or LL)
2233        header['projection'] = '"UTM-' + str(zone) + '"'
2234        header['coordinatetype'] = 'EN'
2235        if header['coordinatetype'] == 'LL':
2236            header['longitude'] = str(newxllcorner)
2237            header['latitude'] = str(newyllcorner)
2238        elif header['coordinatetype'] == 'EN':
2239            header['eastings'] = str(newxllcorner)
2240            header['northings'] = str(newyllcorner)
2241        header['nullcellvalue'] = str(NODATA_value)
2242        header['xdimension'] = str(cellsize)
2243        header['ydimension'] = str(cellsize)
2244        header['value'] = '"' + quantity + '"'
2245        #header['celltype'] = 'IEEE8ByteReal'  #FIXME: Breaks unit test
2246
2247        #Write
2248        if verbose: print 'Writing %s' %demfile
2249
2250        import ermapper_grids
2251
2252        ermapper_grids.write_ermapper_grid(demfile, grid_values, header)
2253
2254        fid.close()
2255    else:
2256        #Write to Ascii format
2257        #Write prj file
2258        prjfile = basename_out + '.prj'
2259
2260        if verbose: print 'Writing %s' %prjfile
2261        prjid = open(prjfile, 'w')
2262        prjid.write('Projection    %s\n' %'UTM')
2263        prjid.write('Zone          %d\n' %zone)
2264        prjid.write('Datum         %s\n' %datum)
2265        prjid.write('Zunits        NO\n')
2266        prjid.write('Units         METERS\n')
2267        prjid.write('Spheroid      %s\n' %datum)
2268        prjid.write('Xshift        %d\n' %false_easting)
2269        prjid.write('Yshift        %d\n' %false_northing)
2270        prjid.write('Parameters\n')
2271        prjid.close()
2272
2273        if verbose: print 'Writing %s' %demfile
2274
2275        ascid = open(demfile, 'w')
2276
2277        ascid.write('ncols         %d\n' %ncols)
2278        ascid.write('nrows         %d\n' %nrows)
2279        ascid.write('xllcorner     %d\n' %newxllcorner)
2280        ascid.write('yllcorner     %d\n' %newyllcorner)
2281        ascid.write('cellsize      %f\n' %cellsize)
2282        ascid.write('NODATA_value  %d\n' %NODATA_value)
2283
2284        #Get bounding polygon from mesh
2285        #P = interp.mesh.get_boundary_polygon()
2286        #inside_indices = inside_polygon(grid_points, P)
2287        for i in range(nrows):
2288            if verbose and i % ((nrows+10)/10) == 0:
2289                print 'Doing row %d of %d' %(i, nrows)
2290
2291            base_index = (nrows-i-1)*ncols
2292
2293            slice = grid_values[base_index:base_index+ncols]
2294            #s = array2string(slice, max_line_width=sys.maxint)
2295            s = array2string(slice, max_line_width=sys.maxint,
2296                             precision=number_of_decimal_places)
2297            ascid.write(s[1:-1] + '\n')
2298
2299        #Close
2300        ascid.close()
2301        fid.close()
2302
2303        return basename_out
2304
2305################################################################################
2306# Obsolete functions - mainatined for backwards compatibility
2307################################################################################
2308
2309##
2310# @brief
2311# @param basename_in
2312# @param basename_out
2313# @param quantity
2314# @param timestep
2315# @param reduction
2316# @param cellsize
2317# @param verbose
2318# @param origin
2319# @note OBSOLETE - use sww2dem() instead.
2320def sww2asc(basename_in, basename_out = None,
2321            quantity = None,
2322            timestep = None,
2323            reduction = None,
2324            cellsize = 10,
2325            verbose = False,
2326            origin = None):
2327    print 'sww2asc will soon be obsoleted - please use sww2dem'
2328    sww2dem(basename_in,
2329            basename_out = basename_out,
2330            quantity = quantity,
2331            timestep = timestep,
2332            reduction = reduction,
2333            cellsize = cellsize,
2334            number_of_decimal_places = number_of_decimal_places,
2335            verbose = verbose,
2336            origin = origin,
2337        datum = 'WGS84',
2338        format = 'asc')
2339
2340
2341##
2342# @brief
2343# @param basename_in
2344# @param basename_out
2345# @param quantity
2346# @param timestep
2347# @param reduction
2348# @param cellsize
2349# @param verbose
2350# @param origin
2351# @param datum
2352# @note OBSOLETE - use sww2dem() instead.
2353def sww2ers(basename_in, basename_out=None,
2354            quantity=None,
2355            timestep=None,
2356            reduction=None,
2357            cellsize=10,
2358            verbose=False,
2359            origin=None,
2360            datum='WGS84'):
2361    print 'sww2ers will soon be obsoleted - please use sww2dem'
2362    sww2dem(basename_in,
2363            basename_out=basename_out,
2364            quantity=quantity,
2365            timestep=timestep,
2366            reduction=reduction,
2367            cellsize=cellsize,
2368            number_of_decimal_places=number_of_decimal_places,
2369            verbose=verbose,
2370            origin=origin,
2371            datum=datum,
2372            format='ers')
2373
2374################################################################################
2375# End of obsolete functions
2376################################################################################
2377
2378
2379##
2380# @brief Convert SWW file to PTS file (at selected points).
2381# @param basename_in Stem name of input SWW file.
2382# @param basename_out Stem name of output file.
2383# @param data_points If given, points where quantity is to be computed.
2384# @param quantity Name (or expression) of existing quantity(s) (def: elevation).
2385# @param timestep If given, output quantity at that timestep.
2386# @param reduction If given, reduce quantity by this factor.
2387# @param NODATA_value The NODATA value (default -9999).
2388# @param verbose True if this function is to be verbose.
2389# @param origin ??
2390def sww2pts(basename_in, basename_out=None,
2391            data_points=None,
2392            quantity=None,
2393            timestep=None,
2394            reduction=None,
2395            NODATA_value=-9999,
2396            verbose=False,
2397            origin=None):
2398    """Read SWW file and convert to interpolated values at selected points
2399
2400    The parameter 'quantity' must be the name of an existing quantity or
2401    an expression involving existing quantities. The default is 'elevation'.
2402
2403    if timestep (an index) is given, output quantity at that timestep.
2404
2405    if reduction is given use that to reduce quantity over all timesteps.
2406
2407    data_points (Nx2 array) give locations of points where quantity is to
2408    be computed.
2409    """
2410
2411    import sys
2412    from Numeric import array, Float, concatenate, NewAxis, zeros, reshape
2413    from Numeric import array2string, sometrue
2414    from anuga.utilities.polygon import inside_polygon, outside_polygon, \
2415             separate_points_by_polygon
2416    from anuga.abstract_2d_finite_volumes.util import \
2417             apply_expression_to_dictionary
2418    from anuga.geospatial_data.geospatial_data import Geospatial_data
2419
2420    if quantity is None:
2421        quantity = 'elevation'
2422
2423    if reduction is None:
2424        reduction = max
2425
2426    if basename_out is None:
2427        basename_out = basename_in + '_%s' % quantity
2428
2429    swwfile = basename_in + '.sww'
2430    ptsfile = basename_out + '.pts'
2431
2432    # Read sww file
2433    if verbose: print 'Reading from %s' % swwfile
2434    from Scientific.IO.NetCDF import NetCDFFile
2435    fid = NetCDFFile(swwfile)
2436
2437    # Get extent and reference
2438    x = fid.variables['x'][:]
2439    y = fid.variables['y'][:]
2440    volumes = fid.variables['volumes'][:]
2441
2442    number_of_timesteps = fid.dimensions['number_of_timesteps']
2443    number_of_points = fid.dimensions['number_of_points']
2444    if origin is None:
2445        # Get geo_reference
2446        # sww files don't have to have a geo_ref
2447        try:
2448            geo_reference = Geo_reference(NetCDFObject=fid)
2449        except AttributeError, e:
2450            geo_reference = Geo_reference() # Default georef object
2451
2452        xllcorner = geo_reference.get_xllcorner()
2453        yllcorner = geo_reference.get_yllcorner()
2454        zone = geo_reference.get_zone()
2455    else:
2456        zone = origin[0]
2457        xllcorner = origin[1]
2458        yllcorner = origin[2]
2459
2460    # FIXME: Refactor using code from file_function.statistics
2461    # Something like print swwstats(swwname)
2462    if verbose:
2463        x = fid.variables['x'][:]
2464        y = fid.variables['y'][:]
2465        times = fid.variables['time'][:]
2466        print '------------------------------------------------'
2467        print 'Statistics of SWW file:'
2468        print '  Name: %s' % swwfile
2469        print '  Reference:'
2470        print '    Lower left corner: [%f, %f]' % (xllcorner, yllcorner)
2471        print '    Start time: %f' % fid.starttime[0]
2472        print '  Extent:'
2473        print '    x [m] in [%f, %f], len(x) == %d' \
2474              % (min(x.flat), max(x.flat), len(x.flat))
2475        print '    y [m] in [%f, %f], len(y) == %d' \
2476              % (min(y.flat), max(y.flat), len(y.flat))
2477        print '    t [s] in [%f, %f], len(t) == %d' \
2478              % (min(times), max(times), len(times))
2479        print '  Quantities [SI units]:'
2480        for name in ['stage', 'xmomentum', 'ymomentum', 'elevation']:
2481            q = fid.variables[name][:].flat
2482            print '    %s in [%f, %f]' % (name, min(q), max(q))
2483
2484    # Get quantity and reduce if applicable
2485    if verbose: print 'Processing quantity %s' % quantity
2486
2487    # Turn NetCDF objects into Numeric arrays
2488    quantity_dict = {}
2489    for name in fid.variables.keys():
2490        quantity_dict[name] = fid.variables[name][:]
2491
2492    # Convert quantity expression to quantities found in sww file
2493    q = apply_expression_to_dictionary(quantity, quantity_dict)
2494
2495    if len(q.shape) == 2:
2496        # q has a time component and needs to be reduced along
2497        # the temporal dimension
2498        if verbose: print 'Reducing quantity %s' % quantity
2499
2500        q_reduced = zeros(number_of_points, Float)
2501        for k in range(number_of_points):
2502            q_reduced[k] = reduction(q[:,k])
2503        q = q_reduced
2504
2505    # Post condition: Now q has dimension: number_of_points
2506    assert len(q.shape) == 1
2507    assert q.shape[0] == number_of_points
2508
2509    if verbose:
2510        print 'Processed values for %s are in [%f, %f]' \
2511              % (quantity, min(q), max(q))
2512
2513    # Create grid and update xll/yll corner and x,y
2514    vertex_points = concatenate((x[:, NewAxis], y[:, NewAxis]), axis=1)
2515    assert len(vertex_points.shape) == 2
2516
2517    # Interpolate
2518    from anuga.fit_interpolate.interpolate import Interpolate
2519    interp = Interpolate(vertex_points, volumes, verbose=verbose)
2520
2521    # Interpolate using quantity values
2522    if verbose: print 'Interpolating'
2523    interpolated_values = interp.interpolate(q, data_points).flat
2524
2525    if verbose:
2526        print 'Interpolated values are in [%f, %f]' % (min(interpolated_values),
2527                                                       max(interpolated_values))
2528
2529    # Assign NODATA_value to all points outside bounding polygon
2530    # (from interpolation mesh)
2531    P = interp.mesh.get_boundary_polygon()
2532    outside_indices = outside_polygon(data_points, P, closed=True)
2533
2534    for i in outside_indices:
2535        interpolated_values[i] = NODATA_value
2536
2537    # Store results
2538    G = Geospatial_data(data_points=data_points, attributes=interpolated_values)
2539
2540    G.export_points_file(ptsfile, absolute = True)
2541
2542    fid.close()
2543
2544
2545##
2546# @brief Convert ASC file to DEM file.
2547# @param basename_in Stem of input filename.
2548# @param basename_out Stem of output filename.
2549# @param use_cache ??
2550# @param verbose True if this function is to be verbose.
2551# @return
2552# @note A PRJ file with same stem basename must exist and is used to fix the
2553#       UTM zone, datum, false northings and eastings.
2554def convert_dem_from_ascii2netcdf(basename_in, basename_out=None,
2555                                  use_cache=False,
2556                                  verbose=False):
2557    """Read Digital Elevation model from the following ASCII format (.asc)
2558
2559    Example:
2560    ncols         3121
2561    nrows         1800
2562    xllcorner     722000
2563    yllcorner     5893000
2564    cellsize      25
2565    NODATA_value  -9999
2566    138.3698 137.4194 136.5062 135.5558 ..........
2567
2568    Convert basename_in + '.asc' to NetCDF format (.dem)
2569    mimicking the ASCII format closely.
2570
2571    An accompanying file with same basename_in but extension .prj must exist
2572    and is used to fix the UTM zone, datum, false northings and eastings.
2573
2574    The prj format is assumed to be as
2575
2576    Projection    UTM
2577    Zone          56
2578    Datum         WGS84
2579    Zunits        NO
2580    Units         METERS
2581    Spheroid      WGS84
2582    Xshift        0.0000000000
2583    Yshift        10000000.0000000000
2584    Parameters
2585    """
2586
2587    kwargs = {'basename_out': basename_out, 'verbose': verbose}
2588
2589    if use_cache is True:
2590        from caching import cache
2591        result = cache(_convert_dem_from_ascii2netcdf, basename_in, kwargs,
2592                       dependencies=[basename_in + '.asc',
2593                                     basename_in + '.prj'],
2594                       verbose=verbose)
2595
2596    else:
2597        result = apply(_convert_dem_from_ascii2netcdf, [basename_in], kwargs)
2598
2599    return result
2600
2601
2602##
2603# @brief Convert an ASC file to a DEM file.
2604# @param basename_in Stem of input filename.
2605# @param basename_out Stem of output filename.
2606# @param verbose True if this function is to be verbose.
2607def _convert_dem_from_ascii2netcdf(basename_in, basename_out = None,
2608                                  verbose = False):
2609    """Read Digital Elevation model from the following ASCII format (.asc)
2610
2611    Internal function. See public function convert_dem_from_ascii2netcdf
2612    for details.
2613    """
2614
2615    import os
2616    from Scientific.IO.NetCDF import NetCDFFile
2617    from Numeric import Float, array
2618
2619    root = basename_in
2620
2621    # Read Meta data
2622    if verbose: print 'Reading METADATA from %s' % root + '.prj'
2623
2624    metadatafile = open(root + '.prj')
2625    metalines = metadatafile.readlines()
2626    metadatafile.close()
2627
2628    L = metalines[0].strip().split()
2629    assert L[0].strip().lower() == 'projection'
2630    projection = L[1].strip()                   #TEXT
2631
2632    L = metalines[1].strip().split()
2633    assert L[0].strip().lower() == 'zone'
2634    zone = int(L[1].strip())
2635
2636    L = metalines[2].strip().split()
2637    assert L[0].strip().lower() == 'datum'
2638    datum = L[1].strip()                        #TEXT
2639
2640    L = metalines[3].strip().split()
2641    assert L[0].strip().lower() == 'zunits'     #IGNORE
2642    zunits = L[1].strip()                       #TEXT
2643
2644    L = metalines[4].strip().split()
2645    assert L[0].strip().lower() == 'units'
2646    units = L[1].strip()                        #TEXT
2647
2648    L = metalines[5].strip().split()
2649    assert L[0].strip().lower() == 'spheroid'   #IGNORE
2650    spheroid = L[1].strip()                     #TEXT
2651
2652    L = metalines[6].strip().split()
2653    assert L[0].strip().lower() == 'xshift'
2654    false_easting = float(L[1].strip())
2655
2656    L = metalines[7].strip().split()
2657    assert L[0].strip().lower() == 'yshift'
2658    false_northing = float(L[1].strip())
2659
2660    #Read DEM data
2661    datafile = open(basename_in + '.asc')
2662
2663    if verbose: print 'Reading DEM from %s' % basename_in + '.asc'
2664
2665    lines = datafile.readlines()
2666    datafile.close()
2667
2668    if verbose: print 'Got', len(lines), ' lines'
2669
2670    ncols = int(lines[0].split()[1].strip())
2671    nrows = int(lines[1].split()[1].strip())
2672
2673    # Do cellsize (line 4) before line 2 and 3
2674    cellsize = float(lines[4].split()[1].strip())
2675
2676    # Checks suggested by Joaquim Luis
2677    # Our internal representation of xllcorner
2678    # and yllcorner is non-standard.
2679    xref = lines[2].split()
2680    if xref[0].strip() == 'xllcorner':
2681        xllcorner = float(xref[1].strip()) # + 0.5*cellsize # Correct offset
2682    elif xref[0].strip() == 'xllcenter':
2683        xllcorner = float(xref[1].strip())
2684    else:
2685        msg = 'Unknown keyword: %s' % xref[0].strip()
2686        raise Exception, msg
2687
2688    yref = lines[3].split()
2689    if yref[0].strip() == 'yllcorner':
2690        yllcorner = float(yref[1].strip()) # + 0.5*cellsize # Correct offset
2691    elif yref[0].strip() == 'yllcenter':
2692        yllcorner = float(yref[1].strip())
2693    else:
2694        msg = 'Unknown keyword: %s' % yref[0].strip()
2695        raise Exception, msg
2696
2697    NODATA_value = int(lines[5].split()[1].strip())
2698
2699    assert len(lines) == nrows + 6
2700
2701    if basename_out == None:
2702        netcdfname = root + '.dem'
2703    else:
2704        netcdfname = basename_out + '.dem'
2705
2706    if verbose: print 'Store to NetCDF file %s' % netcdfname
2707
2708    # NetCDF file definition
2709    fid = NetCDFFile(netcdfname, netcdf_mode_w)
2710
2711    #Create new file
2712    fid.institution = 'Geoscience Australia'
2713    fid.description = 'NetCDF DEM format for compact and portable storage ' \
2714                      'of spatial point data'
2715
2716    fid.ncols = ncols
2717    fid.nrows = nrows
2718    fid.xllcorner = xllcorner
2719    fid.yllcorner = yllcorner
2720    fid.cellsize = cellsize
2721    fid.NODATA_value = NODATA_value
2722
2723    fid.zone = zone
2724    fid.false_easting = false_easting
2725    fid.false_northing = false_northing
2726    fid.projection = projection
2727    fid.datum = datum
2728    fid.units = units
2729
2730    # dimension definitions
2731    fid.createDimension('number_of_rows', nrows)
2732    fid.createDimension('number_of_columns', ncols)
2733
2734    # variable definitions
2735    fid.createVariable('elevation', Float, ('number_of_rows',
2736                                            'number_of_columns'))
2737
2738    # Get handles to the variables
2739    elevation = fid.variables['elevation']
2740
2741    #Store data
2742    n = len(lines[6:])
2743    for i, line in enumerate(lines[6:]):
2744        fields = line.split()
2745        if verbose and i % ((n+10)/10) == 0:
2746            print 'Processing row %d of %d' % (i, nrows)
2747        elevation[i, :] = array([float(x) for x in fields])
2748
2749    fid.close()
2750
2751
2752##
2753# @brief Convert 'ferret' file to SWW file.
2754# @param basename_in Stem of input filename.
2755# @param basename_out Stem of output filename.
2756# @param verbose True if this function is to be verbose.
2757# @param minlat
2758# @param maxlat
2759# @param minlon
2760# @param maxlon
2761# @param mint
2762# @param maxt
2763# @param mean_stage
2764# @param origin
2765# @param zscale
2766# @param fail_on_NaN
2767# @param NaN_filler
2768# @param elevation
2769# @param inverted_bathymetry
2770def ferret2sww(basename_in, basename_out=None,
2771               verbose=False,
2772               minlat=None, maxlat=None,
2773               minlon=None, maxlon=None,
2774               mint=None, maxt=None, mean_stage=0,
2775               origin=None, zscale=1,
2776               fail_on_NaN=True,
2777               NaN_filler=0,
2778               elevation=None,
2779               inverted_bathymetry=True
2780               ): #FIXME: Bathymetry should be obtained
2781                                  #from MOST somehow.
2782                                  #Alternatively from elsewhere
2783                                  #or, as a last resort,
2784                                  #specified here.
2785                                  #The value of -100 will work
2786                                  #for the Wollongong tsunami
2787                                  #scenario but is very hacky
2788    """Convert MOST and 'Ferret' NetCDF format for wave propagation to
2789    sww format native to abstract_2d_finite_volumes.
2790
2791    Specify only basename_in and read files of the form
2792    basefilename_ha.nc, basefilename_ua.nc, basefilename_va.nc containing
2793    relative height, x-velocity and y-velocity, respectively.
2794
2795    Also convert latitude and longitude to UTM. All coordinates are
2796    assumed to be given in the GDA94 datum.
2797
2798    min's and max's: If omitted - full extend is used.
2799    To include a value min may equal it, while max must exceed it.
2800    Lat and lon are assuemd to be in decimal degrees
2801
2802    origin is a 3-tuple with geo referenced
2803    UTM coordinates (zone, easting, northing)
2804
2805    nc format has values organised as HA[TIME, LATITUDE, LONGITUDE]
2806    which means that longitude is the fastest
2807    varying dimension (row major order, so to speak)
2808
2809    ferret2sww uses grid points as vertices in a triangular grid
2810    counting vertices from lower left corner upwards, then right
2811    """
2812
2813    import os
2814    from Scientific.IO.NetCDF import NetCDFFile
2815    from Numeric import Float, Int, Int32, searchsorted, zeros, array
2816    from Numeric import allclose, around
2817
2818    precision = Float
2819
2820    msg = 'Must use latitudes and longitudes for minlat, maxlon etc'
2821
2822    if minlat != None:
2823        assert -90 < minlat < 90 , msg
2824    if maxlat != None:
2825        assert -90 < maxlat < 90 , msg
2826        if minlat != None:
2827            assert maxlat > minlat
2828    if minlon != None:
2829        assert -180 < minlon < 180 , msg
2830    if maxlon != None:
2831        assert -180 < maxlon < 180 , msg
2832        if minlon != None:
2833            assert maxlon > minlon
2834
2835    # Get NetCDF data
2836    if verbose: print 'Reading files %s_*.nc' % basename_in
2837
2838    file_h = NetCDFFile(basename_in + '_ha.nc', netcdf_mode_r) # Wave amplitude (cm)
2839    file_u = NetCDFFile(basename_in + '_ua.nc', netcdf_mode_r) # Velocity (x) (cm/s)
2840    file_v = NetCDFFile(basename_in + '_va.nc', netcdf_mode_r) # Velocity (y) (cm/s)
2841    file_e = NetCDFFile(basename_in + '_e.nc', netcdf_mode_r)  # Elevation (z) (m)
2842
2843    if basename_out is None:
2844        swwname = basename_in + '.sww'
2845    else:
2846        swwname = basename_out + '.sww'
2847
2848    # Get dimensions of file_h
2849    for dimension in file_h.dimensions.keys():
2850        if dimension[:3] == 'LON':
2851            dim_h_longitude = dimension
2852        if dimension[:3] == 'LAT':
2853            dim_h_latitude = dimension
2854        if dimension[:4] == 'TIME':
2855            dim_h_time = dimension
2856
2857    times = file_h.variables[dim_h_time]
2858    latitudes = file_h.variables[dim_h_latitude]
2859    longitudes = file_h.variables[dim_h_longitude]
2860
2861    kmin, kmax, lmin, lmax = _get_min_max_indexes(latitudes[:],
2862                                                  longitudes[:],
2863                                                  minlat, maxlat,
2864                                                  minlon, maxlon)
2865    # get dimensions for file_e
2866    for dimension in file_e.dimensions.keys():
2867        if dimension[:3] == 'LON':
2868            dim_e_longitude = dimension
2869        if dimension[:3] == 'LAT':
2870            dim_e_latitude = dimension
2871
2872    # get dimensions for file_u
2873    for dimension in file_u.dimensions.keys():
2874        if dimension[:3] == 'LON':
2875            dim_u_longitude = dimension
2876        if dimension[:3] == 'LAT':
2877            dim_u_latitude = dimension
2878        if dimension[:4] == 'TIME':
2879            dim_u_time = dimension
2880
2881    # get dimensions for file_v
2882    for dimension in file_v.dimensions.keys():
2883        if dimension[:3] == 'LON':
2884            dim_v_longitude = dimension
2885        if dimension[:3] == 'LAT':
2886            dim_v_latitude = dimension
2887        if dimension[:4] == 'TIME':
2888            dim_v_time = dimension
2889
2890    # Precision used by most for lat/lon is 4 or 5 decimals
2891    e_lat = around(file_e.variables[dim_e_latitude][:], 5)
2892    e_lon = around(file_e.variables[dim_e_longitude][:], 5)
2893
2894    # Check that files are compatible
2895    assert allclose(latitudes, file_u.variables[dim_u_latitude])
2896    assert allclose(latitudes, file_v.variables[dim_v_latitude])
2897    assert allclose(latitudes, e_lat)
2898
2899    assert allclose(longitudes, file_u.variables[dim_u_longitude])
2900    assert allclose(longitudes, file_v.variables[dim_v_longitude])
2901    assert allclose(longitudes, e_lon)
2902
2903    if mint is None:
2904        jmin = 0
2905        mint = times[0]
2906    else:
2907        jmin = searchsorted(times, mint)
2908
2909    if maxt is None:
2910        jmax = len(times)
2911        maxt = times[-1]
2912    else:
2913        jmax = searchsorted(times, maxt)
2914
2915    kmin, kmax, lmin, lmax = _get_min_max_indexes(latitudes[:],
2916                                                  longitudes[:],
2917                                                  minlat, maxlat,
2918                                                  minlon, maxlon)
2919
2920
2921    times = times[jmin:jmax]
2922    latitudes = latitudes[kmin:kmax]
2923    longitudes = longitudes[lmin:lmax]
2924
2925    if verbose: print 'cropping'
2926
2927    zname = 'ELEVATION'
2928
2929    amplitudes = file_h.variables['HA'][jmin:jmax, kmin:kmax, lmin:lmax]
2930    uspeed = file_u.variables['UA'][jmin:jmax, kmin:kmax, lmin:lmax] #Lon
2931    vspeed = file_v.variables['VA'][jmin:jmax, kmin:kmax, lmin:lmax] #Lat
2932    elevations = file_e.variables[zname][kmin:kmax, lmin:lmax]
2933
2934    #    if latitudes2[0]==latitudes[0] and latitudes2[-1]==latitudes[-1]:
2935    #        elevations = file_e.variables['ELEVATION'][kmin:kmax, lmin:lmax]
2936    #    elif latitudes2[0]==latitudes[-1] and latitudes2[-1]==latitudes[0]:
2937    #        from Numeric import asarray
2938    #        elevations=elevations.tolist()
2939    #        elevations.reverse()
2940    #        elevations=asarray(elevations)
2941    #    else:
2942    #        from Numeric import asarray
2943    #        elevations=elevations.tolist()
2944    #        elevations.reverse()
2945    #        elevations=asarray(elevations)
2946    #        'print hmmm'
2947
2948    #Get missing values
2949    nan_ha = file_h.variables['HA'].missing_value[0]
2950    nan_ua = file_u.variables['UA'].missing_value[0]
2951    nan_va = file_v.variables['VA'].missing_value[0]
2952    if hasattr(file_e.variables[zname],'missing_value'):
2953        nan_e  = file_e.variables[zname].missing_value[0]
2954    else:
2955        nan_e = None
2956
2957    #Cleanup
2958    from Numeric import sometrue
2959
2960    missing = (amplitudes == nan_ha)
2961    if sometrue (missing):
2962        if fail_on_NaN:
2963            msg = 'NetCDFFile %s contains missing values' \
2964                  % basename_in + '_ha.nc'
2965            raise DataMissingValuesError, msg
2966        else:
2967            amplitudes = amplitudes*(missing==0) + missing*NaN_filler
2968
2969    missing = (uspeed == nan_ua)
2970    if sometrue (missing):
2971        if fail_on_NaN:
2972            msg = 'NetCDFFile %s contains missing values' \
2973                  % basename_in + '_ua.nc'
2974            raise DataMissingValuesError, msg
2975        else:
2976            uspeed = uspeed*(missing==0) + missing*NaN_filler
2977
2978    missing = (vspeed == nan_va)
2979    if sometrue (missing):
2980        if fail_on_NaN:
2981            msg = 'NetCDFFile %s contains missing values' \
2982                  % basename_in + '_va.nc'
2983            raise DataMissingValuesError, msg
2984        else:
2985            vspeed = vspeed*(missing==0) + missing*NaN_filler
2986
2987    missing = (elevations == nan_e)
2988    if sometrue (missing):
2989        if fail_on_NaN:
2990            msg = 'NetCDFFile %s contains missing values' \
2991                  % basename_in + '_e.nc'
2992            raise DataMissingValuesError, msg
2993        else:
2994            elevations = elevations*(missing==0) + missing*NaN_filler
2995
2996    number_of_times = times.shape[0]
2997    number_of_latitudes = latitudes.shape[0]
2998    number_of_longitudes = longitudes.shape[0]
2999
3000    assert amplitudes.shape[0] == number_of_times
3001    assert amplitudes.shape[1] == number_of_latitudes
3002    assert amplitudes.shape[2] == number_of_longitudes
3003
3004    if verbose:
3005        print '------------------------------------------------'
3006        print 'Statistics:'
3007        print '  Extent (lat/lon):'
3008        print '    lat in [%f, %f], len(lat) == %d' \
3009              % (min(latitudes.flat), max(latitudes.flat), len(latitudes.flat))
3010        print '    lon in [%f, %f], len(lon) == %d' \
3011              % (min(longitudes.flat), max(longitudes.flat),
3012                 len(longitudes.flat))
3013        print '    t in [%f, %f], len(t) == %d' \
3014              % (min(times.flat), max(times.flat), len(times.flat))
3015
3016        q = amplitudes.flat
3017        name = 'Amplitudes (ha) [cm]'
3018        print %s in [%f, %f]' % (name, min(q), max(q))
3019
3020        q = uspeed.flat
3021        name = 'Speeds (ua) [cm/s]'
3022        print %s in [%f, %f]' % (name, min(q), max(q))
3023
3024        q = vspeed.flat
3025        name = 'Speeds (va) [cm/s]'
3026        print %s in [%f, %f]' % (name, min(q), max(q))
3027
3028        q = elevations.flat
3029        name = 'Elevations (e) [m]'
3030        print %s in [%f, %f]' % (name, min(q), max(q))
3031
3032    # print number_of_latitudes, number_of_longitudes
3033    number_of_points = number_of_latitudes * number_of_longitudes
3034    number_of_volumes = (number_of_latitudes-1) * (number_of_longitudes-1) * 2
3035
3036    file_h.close()
3037    file_u.close()
3038    file_v.close()
3039    file_e.close()
3040
3041    # NetCDF file definition
3042    outfile = NetCDFFile(swwname, netcdf_mode_w)
3043
3044    description = 'Converted from Ferret files: %s, %s, %s, %s' \
3045                  % (basename_in + '_ha.nc',
3046                     basename_in + '_ua.nc',
3047                     basename_in + '_va.nc',
3048                     basename_in + '_e.nc')
3049
3050    # Create new file
3051    starttime = times[0]
3052
3053    sww = Write_sww()
3054    sww.store_header(outfile, times, number_of_volumes,
3055                     number_of_points, description=description,
3056                     verbose=verbose, sww_precision=Float)
3057
3058    # Store
3059    from anuga.coordinate_transforms.redfearn import redfearn
3060    x = zeros(number_of_points, Float)  #Easting
3061    y = zeros(number_of_points, Float)  #Northing
3062
3063    if verbose: print 'Making triangular grid'
3064
3065    # Check zone boundaries
3066    refzone, _, _ = redfearn(latitudes[0], longitudes[0])
3067
3068    vertices = {}
3069    i = 0
3070    for k, lat in enumerate(latitudes):       # Y direction
3071        for l, lon in enumerate(longitudes):  # X direction
3072            vertices[l,k] = i
3073
3074            zone, easting, northing = redfearn(lat,lon)
3075
3076            #msg = 'Zone boundary crossed at longitude =', lon
3077            #assert zone == refzone, msg
3078            #print '%7.2f %7.2f %8.2f %8.2f' %(lon, lat, easting, northing)
3079            x[i] = easting
3080            y[i] = northing
3081            i += 1
3082
3083    #Construct 2 triangles per 'rectangular' element
3084    volumes = []
3085    for l in range(number_of_longitudes-1):    # X direction
3086        for k in range(number_of_latitudes-1): # Y direction
3087            v1 = vertices[l,k+1]
3088            v2 = vertices[l,k]
3089            v3 = vertices[l+1,k+1]
3090            v4 = vertices[l+1,k]
3091
3092            volumes.append([v1,v2,v3]) #Upper element
3093            volumes.append([v4,v3,v2]) #Lower element
3094
3095    volumes = array(volumes)
3096
3097    if origin is None:
3098        origin = Geo_reference(refzone, min(x), min(y))
3099    geo_ref = write_NetCDF_georeference(origin, outfile)
3100
3101    if elevation is not None:
3102        z = elevation
3103    else:
3104        if inverted_bathymetry:
3105            z = -1 * elevations
3106        else:
3107            z = elevations
3108    #FIXME: z should be obtained from MOST and passed in here
3109
3110    #FIXME use the Write_sww instance(sww) to write this info
3111    from Numeric import resize
3112    z = resize(z, outfile.variables['z'][:].shape)
3113    outfile.variables['x'][:] = x - geo_ref.get_xllcorner()
3114    outfile.variables['y'][:] = y - geo_ref.get_yllcorner()
3115    outfile.variables['z'][:] = z             #FIXME HACK for bacwards compat.
3116    outfile.variables['elevation'][:] = z
3117    outfile.variables['volumes'][:] = volumes.astype(Int32) #For Opteron 64
3118
3119    #Time stepping
3120    stage = outfile.variables['stage']
3121    xmomentum = outfile.variables['xmomentum']
3122    ymomentum = outfile.variables['ymomentum']
3123
3124    if verbose: print 'Converting quantities'
3125
3126    n = len(times)
3127    for j in range(n):
3128        if verbose and j % ((n+10)/10) == 0: print '  Doing %d of %d' %(j, n)
3129
3130        i = 0
3131        for k in range(number_of_latitudes):      # Y direction
3132            for l in range(number_of_longitudes): # X direction
3133                w = zscale * amplitudes[j,k,l] / 100 + mean_stage
3134                stage[j,i] = w
3135                h = w - z[i]
3136                xmomentum[j,i] = uspeed[j,k,l]/100*h
3137                ymomentum[j,i] = vspeed[j,k,l]/100*h
3138                i += 1
3139
3140    #outfile.close()
3141
3142    #FIXME: Refactor using code from file_function.statistics
3143    #Something like print swwstats(swwname)
3144    if verbose:
3145        x = outfile.variables['x'][:]
3146        y = outfile.variables['y'][:]
3147        print '------------------------------------------------'
3148        print 'Statistics of output file:'
3149        print '  Name: %s' %swwname
3150        print '  Reference:'
3151        print '    Lower left corner: [%f, %f]' \
3152              % (geo_ref.get_xllcorner(), geo_ref.get_yllcorner())
3153        print ' Start time: %f' %starttime
3154        print '    Min time: %f' %mint
3155        print '    Max time: %f' %maxt
3156        print '  Extent:'
3157        print '    x [m] in [%f, %f], len(x) == %d' \
3158              % (min(x.flat), max(x.flat), len(x.flat))
3159        print '    y [m] in [%f, %f], len(y) == %d' \
3160              % (min(y.flat), max(y.flat), len(y.flat))
3161        print '    t [s] in [%f, %f], len(t) == %d' \
3162              % (min(times), max(times), len(times))
3163        print '  Quantities [SI units]:'
3164        for name in ['stage', 'xmomentum', 'ymomentum', 'elevation']:
3165            q = outfile.variables[name][:].flat
3166            print '    %s in [%f, %f]' % (name, min(q), max(q))
3167
3168    outfile.close()
3169
3170
3171##
3172# @brief Convert time-series text file to TMS file.
3173# @param filename
3174# @param quantity_names
3175# @param time_as_seconds
3176def timefile2netcdf(filename, quantity_names=None, time_as_seconds=False):
3177    """Template for converting typical text files with time series to
3178    NetCDF tms file.
3179
3180    The file format is assumed to be either two fields separated by a comma:
3181
3182        time [DD/MM/YY hh:mm:ss], value0 value1 value2 ...
3183
3184    E.g
3185
3186      31/08/04 00:00:00, 1.328223 0 0
3187      31/08/04 00:15:00, 1.292912 0 0
3188
3189    or time (seconds), value0 value1 value2 ...
3190
3191      0.0, 1.328223 0 0
3192      0.1, 1.292912 0 0
3193
3194    will provide a time dependent function f(t) with three attributes
3195
3196    filename is assumed to be the rootname with extenisons .txt and .sww
3197    """
3198
3199    import time, calendar
3200    from Numeric import array
3201    from anuga.config import time_format
3202    from anuga.utilities.numerical_tools import ensure_numeric
3203
3204    file_text = filename + '.txt'
3205    fid = open(file_text)
3206    line = fid.readline()
3207    fid.close()
3208
3209    fields = line.split(',')
3210    msg = "File %s must have the format 'datetime, value0 value1 value2 ...'" \
3211          % file_text
3212    assert len(fields) == 2, msg
3213
3214    if not time_as_seconds:
3215        try:
3216            starttime = calendar.timegm(time.strptime(fields[0], time_format))
3217        except ValueError:
3218            msg = 'First field in file %s must be' % file_text
3219            msg += ' date-time with format %s.\n' % time_format
3220            msg += 'I got %s instead.' % fields[0]
3221            raise DataTimeError, msg
3222    else:
3223        try:
3224            starttime = float(fields[0])
3225        except Error:
3226            msg = "Bad time format"
3227            raise DataTimeError, msg
3228
3229    # Split values
3230    values = []
3231    for value in fields[1].split():
3232        values.append(float(value))
3233
3234    q = ensure_numeric(values)
3235
3236    msg = 'ERROR: File must contain at least one independent value'
3237    assert len(q.shape) == 1, msg
3238
3239    # Read times proper
3240    from Numeric import zeros, Float, alltrue
3241    from anuga.config import time_format
3242    import time, calendar
3243
3244    fid = open(file_text)
3245    lines = fid.readlines()
3246    fid.close()
3247
3248    N = len(lines)
3249    d = len(q)
3250
3251    T = zeros(N, Float)       # Time
3252    Q = zeros((N, d), Float)  # Values
3253
3254    for i, line in enumerate(lines):
3255        fields = line.split(',')
3256        if not time_as_seconds:
3257            realtime = calendar.timegm(time.strptime(fields[0], time_format))
3258        else:
3259             realtime = float(fields[0])
3260        T[i] = realtime - starttime
3261
3262        for j, value in enumerate(fields[1].split()):
3263            Q[i, j] = float(value)
3264
3265    msg = 'File %s must list time as a monotonuosly ' % filename
3266    msg += 'increasing sequence'
3267    assert alltrue(T[1:] - T[:-1] > 0), msg
3268
3269    #Create NetCDF file
3270    from Scientific.IO.NetCDF import NetCDFFile
3271
3272    fid = NetCDFFile(filename + '.tms', netcdf_mode_w)
3273
3274    fid.institution = 'Geoscience Australia'
3275    fid.description = 'Time series'
3276
3277    #Reference point
3278    #Start time in seconds since the epoch (midnight 1/1/1970)
3279    #FIXME: Use Georef
3280    fid.starttime = starttime
3281
3282    # dimension definitions
3283    #fid.createDimension('number_of_volumes', self.number_of_volumes)
3284    #fid.createDimension('number_of_vertices', 3)
3285
3286    fid.createDimension('number_of_timesteps', len(T))
3287
3288    fid.createVariable('time', Float, ('number_of_timesteps',))
3289
3290    fid.variables['time'][:] = T
3291
3292    for i in range(Q.shape[1]):
3293        try:
3294            name = quantity_names[i]
3295        except:
3296            name = 'Attribute%d' % i
3297
3298        fid.createVariable(name, Float, ('number_of_timesteps',))
3299        fid.variables[name][:] = Q[:,i]
3300
3301    fid.close()
3302
3303
3304##
3305# @brief Get the extents of a NetCDF data file.
3306# @param file_name The path to the SWW file.
3307# @return A list of x, y, z and stage limits (min, max).
3308def extent_sww(file_name):
3309    """Read in an sww file.
3310
3311    Input:
3312    file_name - the sww file
3313
3314    Output:
3315    A list: [min(x),max(x),min(y),max(y),min(stage.flat),max(stage.flat)]
3316    """
3317
3318    from Scientific.IO.NetCDF import NetCDFFile
3319
3320    #Get NetCDF
3321    fid = NetCDFFile(file_name, netcdf_mode_r)
3322
3323    # Get the variables
3324    x = fid.variables['x'][:]
3325    y = fid.variables['y'][:]
3326    stage = fid.variables['stage'][:]
3327
3328    fid.close()
3329
3330    return [min(x), max(x), min(y), max(y), min(stage.flat), max(stage.flat)]
3331
3332
3333##
3334# @brief
3335# @param filename
3336# @param boundary
3337# @param t
3338# @param fail_if_NaN
3339# @param NaN_filler
3340# @param verbose
3341# @param very_verbose
3342# @return
3343def sww2domain(filename, boundary=None, t=None,
3344               fail_if_NaN=True, NaN_filler=0,
3345               verbose=False, very_verbose=False):
3346    """
3347    Usage: domain = sww2domain('file.sww',t=time (default = last time in file))
3348
3349    Boundary is not recommended if domain.smooth is not selected, as it
3350    uses unique coordinates, but not unique boundaries. This means that
3351    the boundary file will not be compatable with the coordinates, and will
3352    give a different final boundary, or crash.
3353    """
3354
3355    from Scientific.IO.NetCDF import NetCDFFile
3356    from shallow_water import Domain
3357    from Numeric import asarray, transpose, resize
3358
3359    # initialise NaN.
3360    NaN = 9.969209968386869e+036
3361
3362    if verbose: print 'Reading from ', filename
3363
3364    fid = NetCDFFile(filename, netcdf_mode_r)    # Open existing file for read
3365    time = fid.variables['time']       # Timesteps
3366    if t is None:
3367        t = time[-1]
3368    time_interp = get_time_interp(time,t)
3369
3370    # Get the variables as Numeric arrays
3371    x = fid.variables['x'][:]                   # x-coordinates of vertices
3372    y = fid.variables['y'][:]                   # y-coordinates of vertices
3373    elevation = fid.variables['elevation']      # Elevation
3374    stage = fid.variables['stage']              # Water level
3375    xmomentum = fid.variables['xmomentum']      # Momentum in the x-direction
3376    ymomentum = fid.variables['ymomentum']      # Momentum in the y-direction
3377
3378    starttime = fid.starttime[0]
3379    volumes = fid.variables['volumes'][:]       # Connectivity
3380    coordinates = transpose(asarray([x.tolist(), y.tolist()]))
3381    # FIXME (Ole): Something like this might be better:
3382    #                 concatenate((x, y), axis=1)
3383    # or              concatenate((x[:,NewAxis], x[:,NewAxis]), axis=1)
3384
3385    conserved_quantities = []
3386    interpolated_quantities = {}
3387    other_quantities = []
3388
3389    # get geo_reference
3390    try:                             # sww files don't have to have a geo_ref
3391        geo_reference = Geo_reference(NetCDFObject=fid)
3392    except: # AttributeError, e:
3393        geo_reference = None
3394
3395    if verbose: print '    getting quantities'
3396
3397    for quantity in fid.variables.keys():
3398        dimensions = fid.variables[quantity].dimensions
3399        if 'number_of_timesteps' in dimensions:
3400            conserved_quantities.append(quantity)
3401            interpolated_quantities[quantity] = \
3402                  interpolated_quantity(fid.variables[quantity][:], time_interp)
3403        else:
3404            other_quantities.append(quantity)
3405
3406    other_quantities.remove('x')
3407    other_quantities.remove('y')
3408    other_quantities.remove('z')
3409    other_quantities.remove('volumes')
3410    try:
3411        other_quantities.remove('stage_range')
3412        other_quantities.remove('xmomentum_range')
3413        other_quantities.remove('ymomentum_range')
3414        other_quantities.remove('elevation_range')
3415    except:
3416        pass
3417
3418    conserved_quantities.remove('time')
3419
3420    if verbose: print '    building domain'
3421
3422    #    From domain.Domain:
3423    #    domain = Domain(coordinates, volumes,\
3424    #                    conserved_quantities = conserved_quantities,\
3425    #                    other_quantities = other_quantities,zone=zone,\
3426    #                    xllcorner=xllcorner, yllcorner=yllcorner)
3427
3428    # From shallow_water.Domain:
3429    coordinates = coordinates.tolist()
3430    volumes = volumes.tolist()
3431    # FIXME:should this be in mesh? (peter row)
3432    if fid.smoothing == 'Yes':
3433        unique = False
3434    else:
3435        unique = True
3436    if unique:
3437        coordinates, volumes, boundary = weed(coordinates, volumes,boundary)
3438
3439    try:
3440        domain = Domain(coordinates, volumes, boundary)
3441    except AssertionError, e:
3442        fid.close()
3443        msg = 'Domain could not be created: %s. ' \
3444              'Perhaps use "fail_if_NaN=False and NaN_filler = ..."' % e
3445        raise DataDomainError, msg
3446
3447    if not boundary is None:
3448        domain.boundary = boundary
3449
3450    domain.geo_reference = geo_reference
3451
3452    domain.starttime = float(starttime) + float(t)
3453    domain.time = 0.0
3454
3455    for quantity in other_quantities:
3456        try:
3457            NaN = fid.variables[quantity].missing_value
3458        except:
3459            pass                       # quantity has no missing_value number
3460        X = fid.variables[quantity][:]
3461        if very_verbose:
3462            print '       ', quantity
3463            print '        NaN =', NaN
3464            print '        max(X)'
3465            print '       ', max(X)
3466            print '        max(X)==NaN'
3467            print '       ', max(X)==NaN
3468            print ''
3469        if max(X) == NaN or min(X) == NaN:
3470            if fail_if_NaN:
3471                msg = 'quantity "%s" contains no_data entry' % quantity
3472                raise DataMissingValuesError, msg
3473            else:
3474                data = (X != NaN)
3475                X = (X*data) + (data==0)*NaN_filler
3476        if unique:
3477            X = resize(X, (len(X)/3, 3))
3478        domain.set_quantity(quantity, X)
3479    #
3480    for quantity in conserved_quantities:
3481        try:
3482            NaN = fid.variables[quantity].missing_value
3483        except:
3484            pass                       # quantity has no missing_value number
3485        X = interpolated_quantities[quantity]
3486        if very_verbose:
3487            print '       ',quantity
3488            print '        NaN =', NaN
3489            print '        max(X)'
3490            print '       ', max(X)
3491            print '        max(X)==NaN'
3492            print '       ', max(X)==NaN
3493            print ''
3494        if max(X) == NaN or min(X) == NaN:
3495            if fail_if_NaN:
3496                msg = 'quantity "%s" contains no_data entry' % quantity
3497                raise DataMissingValuesError, msg
3498            else:
3499                data = (X != NaN)
3500                X = (X*data) + (data==0)*NaN_filler
3501        if unique:
3502            X = resize(X, (X.shape[0]/3, 3))
3503        domain.set_quantity(quantity, X)
3504
3505    fid.close()
3506
3507    return domain
3508
3509
3510##
3511# @brief Interpolate a quantity wrt time.
3512# @param saved_quantity The quantity to interpolate.
3513# @param time_interp (index, ratio)
3514# @return A vector of interpolated values.
3515def interpolated_quantity(saved_quantity, time_interp):
3516    '''Given an index and ratio, interpolate quantity with respect to time.'''
3517
3518    index, ratio = time_interp
3519
3520    Q = saved_quantity
3521
3522    if ratio > 0:
3523        q = (1-ratio)*Q[index] + ratio*Q[index+1]
3524    else:
3525        q = Q[index]
3526
3527    #Return vector of interpolated values
3528    return q
3529
3530
3531##
3532# @brief
3533# @parm time
3534# @param t
3535# @return An (index, ration) tuple.
3536def get_time_interp(time, t=None):
3537    #Finds the ratio and index for time interpolation.
3538    #It is borrowed from previous abstract_2d_finite_volumes code.
3539    if t is None:
3540        t=time[-1]
3541        index = -1
3542        ratio = 0.
3543    else:
3544        T = time
3545        tau = t
3546        index=0
3547        msg = 'Time interval derived from file %s [%s:%s]' \
3548              % ('FIXMEfilename', T[0], T[-1])
3549        msg += ' does not match model time: %s' % tau
3550        if tau < time[0]: raise DataTimeError, msg
3551        if tau > time[-1]: raise DataTimeError, msg
3552        while tau > time[index]: index += 1
3553        while tau < time[index]: index -= 1
3554        if tau == time[index]:
3555            #Protect against case where tau == time[-1] (last time)
3556            # - also works in general when tau == time[i]
3557            ratio = 0
3558        else:
3559            #t is now between index and index+1
3560            ratio = (tau - time[index])/(time[index+1] - time[index])
3561
3562    return (index, ratio)
3563
3564
3565##
3566# @brief
3567# @param coordinates
3568# @param volumes
3569# @param boundary
3570def weed(coordinates, volumes, boundary=None):
3571    if type(coordinates) == ArrayType:
3572        coordinates = coordinates.tolist()
3573    if type(volumes) == ArrayType:
3574        volumes = volumes.tolist()
3575
3576    unique = False
3577    point_dict = {}
3578    same_point = {}
3579    for i in range(len(coordinates)):
3580        point = tuple(coordinates[i])
3581        if point_dict.has_key(point):
3582            unique = True
3583            same_point[i] = point
3584            #to change all point i references to point j
3585        else:
3586            point_dict[point] = i
3587            same_point[i] = point
3588
3589    coordinates = []
3590    i = 0
3591    for point in point_dict.keys():
3592        point = tuple(point)
3593        coordinates.append(list(point))
3594        point_dict[point] = i
3595        i += 1
3596
3597    for volume in volumes:
3598        for i in range(len(volume)):
3599            index = volume[i]
3600            if index > -1:
3601                volume[i] = point_dict[same_point[index]]
3602
3603    new_boundary = {}
3604    if not boundary is None:
3605        for segment in boundary.keys():
3606            point0 = point_dict[same_point[segment[0]]]
3607            point1 = point_dict[same_point[segment[1]]]
3608            label = boundary[segment]
3609            #FIXME should the bounday attributes be concaterated
3610            #('exterior, pond') or replaced ('pond')(peter row)
3611
3612            if new_boundary.has_key((point0, point1)):
3613                new_boundary[(point0,point1)] = new_boundary[(point0, point1)]
3614
3615            elif new_boundary.has_key((point1, point0)):
3616                new_boundary[(point1,point0)] = new_boundary[(point1, point0)]
3617            else: new_boundary[(point0, point1)] = label
3618
3619        boundary = new_boundary
3620
3621    return coordinates, volumes, boundary
3622
3623
3624##
3625# @brief Read DEM file, decimate data, write new DEM file.
3626# @param basename_in Stem of the input filename.
3627# @param stencil
3628# @param cellsize_new New cell size to resample on.
3629# @param basename_out Stem of the output filename.
3630# @param verbose True if this function is to be verbose.
3631def decimate_dem(basename_in, stencil, cellsize_new, basename_out=None,
3632                 verbose=False):
3633    """Read Digitial Elevation model from the following NetCDF format (.dem)
3634
3635    Example:
3636
3637    ncols         3121
3638    nrows         1800
3639    xllcorner     722000
3640    yllcorner     5893000
3641    cellsize      25
3642    NODATA_value  -9999
3643    138.3698 137.4194 136.5062 135.5558 ..........
3644
3645    Decimate data to cellsize_new using stencil and write to NetCDF dem format.
3646    """
3647
3648    import os
3649    from Scientific.IO.NetCDF import NetCDFFile
3650    from Numeric import Float, zeros, sum, reshape, equal
3651
3652    root = basename_in
3653    inname = root + '.dem'
3654
3655    #Open existing netcdf file to read
3656    infile = NetCDFFile(inname, netcdf_mode_r)
3657
3658    if verbose: print 'Reading DEM from %s' % inname
3659
3660    #Read metadata
3661    ncols = infile.ncols[0]
3662    nrows = infile.nrows[0]
3663    xllcorner = infile.xllcorner[0]
3664    yllcorner = infile.yllcorner[0]
3665    cellsize = infile.cellsize[0]
3666    NODATA_value = infile.NODATA_value[0]
3667    zone = infile.zone[0]
3668    false_easting = infile.false_easting[0]
3669    false_northing = infile.false_northing[0]
3670    projection = infile.projection
3671    datum = infile.datum
3672    units = infile.units
3673
3674    dem_elevation = infile.variables['elevation']
3675
3676    #Get output file name
3677    if basename_out == None:
3678        outname = root + '_' + repr(cellsize_new) + '.dem'
3679    else:
3680        outname = basename_out + '.dem'
3681
3682    if verbose: print 'Write decimated NetCDF file to %s' % outname
3683
3684    #Determine some dimensions for decimated grid
3685    (nrows_stencil, ncols_stencil) = stencil.shape
3686    x_offset = ncols_stencil / 2
3687    y_offset = nrows_stencil / 2
3688    cellsize_ratio = int(cellsize_new / cellsize)
3689    ncols_new = 1 + (ncols - ncols_stencil) / cellsize_ratio
3690    nrows_new = 1 + (nrows - nrows_stencil) / cellsize_ratio
3691
3692    #Open netcdf file for output
3693    outfile = NetCDFFile(outname, netcdf_mode_w)
3694
3695    #Create new file
3696    outfile.institution = 'Geoscience Australia'
3697    outfile.description = 'NetCDF DEM format for compact and portable ' \
3698                          'storage of spatial point data'
3699
3700    #Georeferencing
3701    outfile.zone = zone
3702    outfile.projection = projection
3703    outfile.datum = datum
3704    outfile.units = units
3705
3706    outfile.cellsize = cellsize_new
3707    outfile.NODATA_value = NODATA_value
3708    outfile.false_easting = false_easting
3709    outfile.false_northing = false_northing
3710
3711    outfile.xllcorner = xllcorner + (x_offset * cellsize)
3712    outfile.yllcorner = yllcorner + (y_offset * cellsize)
3713    outfile.ncols = ncols_new
3714    outfile.nrows = nrows_new
3715
3716    # dimension definition
3717    outfile.createDimension('number_of_points', nrows_new*ncols_new)
3718
3719    # variable definition
3720    outfile.createVariable('elevation', Float, ('number_of_points',))
3721
3722    # Get handle to the variable
3723    elevation = outfile.variables['elevation']
3724
3725    dem_elevation_r = reshape(dem_elevation, (nrows, ncols))
3726
3727    #Store data
3728    global_index = 0
3729    for i in range(nrows_new):
3730        if verbose: print 'Processing row %d of %d' %(i, nrows_new)
3731
3732        lower_index = global_index
3733        telev = zeros(ncols_new, Float)
3734        local_index = 0
3735        trow = i * cellsize_ratio
3736
3737        for j in range(ncols_new):
3738            tcol = j * cellsize_ratio
3739            tmp = dem_elevation_r[trow:trow+nrows_stencil,
3740                                  tcol:tcol+ncols_stencil]
3741
3742            #if dem contains 1 or more NODATA_values set value in
3743            #decimated dem to NODATA_value, else compute decimated
3744            #value using stencil
3745            if sum(sum(equal(tmp, NODATA_value))) > 0:
3746                telev[local_index] = NODATA_value
3747            else:
3748                telev[local_index] = sum(sum(tmp * stencil))
3749
3750            global_index += 1
3751            local_index += 1
3752
3753        upper_index = global_index
3754
3755        elevation[lower_index:upper_index] = telev
3756
3757    assert global_index == nrows_new*ncols_new, \
3758           'index not equal to number of points'
3759
3760    infile.close()
3761    outfile.close()
3762
3763
3764##
3765# @brief
3766# @param filename
3767# @param verbose
3768def tsh2sww(filename, verbose=False):
3769    """
3770    to check if a tsh/msh file 'looks' good.
3771    """
3772
3773    if verbose == True:print 'Creating domain from', filename
3774
3775    domain = pmesh_to_domain_instance(filename, Domain)
3776
3777    if verbose == True:print "Number of triangles = ", len(domain)
3778
3779    domain.smooth = True
3780    domain.format = 'sww'   #Native netcdf visualisation format
3781    file_path, filename = path.split(filename)
3782    filename, ext = path.splitext(filename)
3783    domain.set_name(filename)
3784    domain.reduction = mean
3785
3786    if verbose == True:print "file_path",file_path
3787
3788    if file_path == "":
3789        file_path = "."
3790    domain.set_datadir(file_path)
3791
3792    if verbose == True:
3793        print "Output written to " + domain.get_datadir() + sep + \
3794              domain.get_name() + "." + domain.format
3795
3796    sww = get_dataobject(domain)
3797    sww.store_connectivity()
3798    sww.store_timestep()
3799
3800
3801##
3802# @brief Convert CSIRO ESRI file to an SWW boundary file.
3803# @param bath_dir
3804# @param elevation_dir
3805# @param ucur_dir
3806# @param vcur_dir
3807# @param sww_file
3808# @param minlat
3809# @param maxlat
3810# @param minlon
3811# @param maxlon
3812# @param zscale
3813# @param mean_stage
3814# @param fail_on_NaN
3815# @param elevation_NaN_filler
3816# @param bath_prefix
3817# @param elevation_prefix
3818# @param verbose
3819# @note Also convert latitude and longitude to UTM. All coordinates are
3820#       assumed to be given in the GDA94 datum.
3821def asc_csiro2sww(bath_dir,
3822                  elevation_dir,
3823                  ucur_dir,
3824                  vcur_dir,
3825                  sww_file,
3826                  minlat=None, maxlat=None,
3827                  minlon=None, maxlon=None,
3828                  zscale=1,
3829                  mean_stage=0,
3830                  fail_on_NaN=True,
3831                  elevation_NaN_filler=0,
3832                  bath_prefix='ba',
3833                  elevation_prefix='el',
3834                  verbose=False):
3835    """
3836    Produce an sww boundary file, from esri ascii data from CSIRO.
3837
3838    Also convert latitude and longitude to UTM. All coordinates are
3839    assumed to be given in the GDA94 datum.
3840
3841    assume:
3842    All files are in esri ascii format
3843
3844    4 types of information
3845    bathymetry
3846    elevation
3847    u velocity
3848    v velocity
3849
3850    Assumptions
3851    The metadata of all the files is the same
3852    Each type is in a seperate directory
3853    One bath file with extention .000
3854    The time period is less than 24hrs and uniform.
3855    """
3856
3857    from Scientific.IO.NetCDF import NetCDFFile
3858
3859    from anuga.coordinate_transforms.redfearn import redfearn
3860
3861    precision = Float # So if we want to change the precision its done here
3862
3863    # go in to the bath dir and load the only file,
3864    bath_files = os.listdir(bath_dir)
3865    bath_file = bath_files[0]
3866    bath_dir_file =  bath_dir + os.sep + bath_file
3867    bath_metadata, bath_grid =  _read_asc(bath_dir_file)
3868
3869    #Use the date.time of the bath file as a basis for
3870    #the start time for other files
3871    base_start = bath_file[-12:]
3872
3873    #go into the elevation dir and load the 000 file
3874    elevation_dir_file = elevation_dir  + os.sep + elevation_prefix \
3875                         + base_start
3876
3877    elevation_files = os.listdir(elevation_dir)
3878    ucur_files = os.listdir(ucur_dir)
3879    vcur_files = os.listdir(vcur_dir)
3880    elevation_files.sort()
3881
3882    # the first elevation file should be the
3883    # file with the same base name as the bath data
3884    assert elevation_files[0] == 'el' + base_start
3885
3886    number_of_latitudes = bath_grid.shape[0]
3887    number_of_longitudes = bath_grid.shape[1]
3888    number_of_volumes = (number_of_latitudes-1) * (number_of_longitudes-1) * 2
3889
3890    longitudes = [bath_metadata['xllcorner'] + x*bath_metadata['cellsize'] \
3891                  for x in range(number_of_longitudes)]
3892    latitudes = [bath_metadata['yllcorner'] + y*bath_metadata['cellsize'] \
3893                 for y in range(number_of_latitudes)]
3894
3895     # reverse order of lat, so the first lat represents the first grid row
3896    latitudes.reverse()
3897
3898    kmin, kmax, lmin, lmax = _get_min_max_indexes(latitudes[:],longitudes[:],
3899                                                  minlat=minlat, maxlat=maxlat,
3900                                                  minlon=minlon, maxlon=maxlon)
3901
3902    bath_grid = bath_grid[kmin:kmax,lmin:lmax]
3903    latitudes = latitudes[kmin:kmax]
3904    longitudes = longitudes[lmin:lmax]
3905    number_of_latitudes = len(latitudes)
3906    number_of_longitudes = len(longitudes)
3907    number_of_times = len(os.listdir(elevation_dir))
3908    number_of_points = number_of_latitudes * number_of_longitudes
3909    number_of_volumes = (number_of_latitudes-1) * (number_of_longitudes-1) * 2
3910
3911    #Work out the times
3912    if len(elevation_files) > 1:
3913        # Assume: The time period is less than 24hrs.
3914        time_period = (int(elevation_files[1][-3:]) \
3915                       - int(elevation_files[0][-3:])) * 60*60
3916        times = [x*time_period for x in range(len(elevation_files))]
3917    else:
3918        times = [0.0]
3919
3920    if verbose:
3921        print '------------------------------------------------'
3922        print 'Statistics:'
3923        print '  Extent (lat/lon):'
3924        print '    lat in [%f, %f], len(lat) == %d' \
3925              % (min(latitudes), max(latitudes), len(latitudes))
3926        print '    lon in [%f, %f], len(lon) == %d' \
3927              % (min(longitudes), max(longitudes), len(longitudes))
3928        print '    t in [%f, %f], len(t) == %d' \
3929              % (min(times), max(times), len(times))
3930
3931    ######### WRITE THE SWW FILE #############
3932
3933    # NetCDF file definition
3934    outfile = NetCDFFile(sww_file, netcdf_mode_w)
3935
3936    #Create new file
3937    outfile.institution = 'Geoscience Australia'
3938    outfile.description = 'Converted from XXX'
3939
3940    #For sww compatibility
3941    outfile.smoothing = 'Yes'
3942    outfile.order = 1
3943
3944    #Start time in seconds since the epoch (midnight 1/1/1970)
3945    outfile.starttime = starttime = times[0]
3946
3947    # dimension definitions
3948    outfile.createDimension('number_of_volumes', number_of_volumes)
3949    outfile.createDimension('number_of_vertices', 3)
3950    outfile.createDimension('number_of_points', number_of_points)
3951    outfile.createDimension('number_of_timesteps', number_of_times)
3952
3953    # variable definitions
3954    outfile.createVariable('x', precision, ('number_of_points',))
3955    outfile.createVariable('y', precision, ('number_of_points',))
3956    outfile.createVariable('elevation', precision, ('number_of_points',))
3957
3958    #FIXME: Backwards compatibility
3959    outfile.createVariable('z', precision, ('number_of_points',))
3960    #################################
3961
3962    outfile.createVariable('volumes', Int, ('number_of_volumes',
3963                                            'number_of_vertices'))
3964
3965    outfile.createVariable('time', precision, ('number_of_timesteps',))
3966
3967    outfile.createVariable('stage', precision, ('number_of_timesteps',
3968                                                'number_of_points'))
3969
3970    outfile.createVariable('xmomentum', precision, ('number_of_timesteps',
3971                                                    'number_of_points'))
3972
3973    outfile.createVariable('ymomentum', precision, ('number_of_timesteps',
3974                                                    'number_of_points'))
3975
3976    #Store
3977    from anuga.coordinate_transforms.redfearn import redfearn
3978
3979    x = zeros(number_of_points, Float)  #Easting
3980    y = zeros(number_of_points, Float)  #Northing
3981
3982    if verbose: print 'Making triangular grid'
3983
3984    #Get zone of 1st point.
3985    refzone, _, _ = redfearn(latitudes[0], longitudes[0])
3986
3987    vertices = {}
3988    i = 0
3989    for k, lat in enumerate(latitudes):
3990        for l, lon in enumerate(longitudes):
3991            vertices[l,k] = i
3992
3993            zone, easting, northing = redfearn(lat, lon)
3994
3995            #msg = 'Zone boundary crossed at longitude =', lon
3996            #assert zone == refzone, msg
3997            #print '%7.2f %7.2f %8.2f %8.2f' %(lon, lat, easting, northing)
3998            x[i] = easting
3999            y[i] = northing
4000            i += 1
4001
4002    #Construct 2 triangles per 'rectangular' element
4003    volumes = []
4004    for l in range(number_of_longitudes-1):    #X direction
4005        for k in range(number_of_latitudes-1): #Y direction
4006            v1 = vertices[l,k+1]
4007            v2 = vertices[l,k]
4008            v3 = vertices[l+1,k+1]
4009            v4 = vertices[l+1,k]
4010
4011            #Note, this is different to the ferrit2sww code
4012            #since the order of the lats is reversed.
4013            volumes.append([v1,v3,v2]) #Upper element
4014            volumes.append([v4,v2,v3]) #Lower element
4015
4016    volumes = array(volumes)
4017
4018    geo_ref = Geo_reference(refzone, min(x), min(y))
4019    geo_ref.write_NetCDF(outfile)
4020
4021    # This will put the geo ref in the middle
4022    #geo_ref = Geo_reference(refzone, (max(x)+min(x))/2., (max(x)+min(y))/2.)
4023
4024    if verbose:
4025        print '------------------------------------------------'
4026        print 'More Statistics:'
4027        print '  Extent (/lon):'
4028        print '    x in [%f, %f], len(lat) == %d' \
4029              % (min(x), max(x), len(x))
4030        print '    y in [%f, %f], len(lon) == %d' \
4031              % (min(y), max(y), len(y))
4032        print 'geo_ref: ', geo_ref
4033
4034    z = resize(bath_grid,outfile.variables['z'][:].shape)
4035    outfile.variables['x'][:] = x - geo_ref.get_xllcorner()
4036    outfile.variables['y'][:] = y - geo_ref.get_yllcorner()
4037# FIXME (Ole): Remove once viewer has been recompiled and changed
4038#              to use elevation instead of z
4039    outfile.variables['z'][:] = z
4040    outfile.variables['elevation'][:] = z
4041    outfile.variables['volumes'][:] = volumes.astype(Int32) # On Opteron 64
4042
4043    stage = outfile.variables['stage']
4044    xmomentum = outfile.variables['xmomentum']
4045    ymomentum = outfile.variables['ymomentum']
4046
4047    outfile.variables['time'][:] = times   #Store time relative
4048
4049    if verbose: print 'Converting quantities'
4050
4051    n = number_of_times
4052    for j in range(number_of_times):
4053        # load in files
4054        elevation_meta, elevation_grid = \
4055            _read_asc(elevation_dir + os.sep + elevation_files[j])
4056
4057        _, u_momentum_grid = _read_asc(ucur_dir + os.sep + ucur_files[j])
4058        _, v_momentum_grid = _read_asc(vcur_dir + os.sep + vcur_files[j])
4059
4060        #cut matrix to desired size
4061        elevation_grid = elevation_grid[kmin:kmax,lmin:lmax]
4062        u_momentum_grid = u_momentum_grid[kmin:kmax,lmin:lmax]
4063        v_momentum_grid = v_momentum_grid[kmin:kmax,lmin:lmax]
4064
4065        # handle missing values
4066        missing = (elevation_grid == elevation_meta['NODATA_value'])
4067        if sometrue (missing):
4068            if fail_on_NaN:
4069                msg = 'File %s contains missing values' \
4070                      % (elevation_files[j])
4071                raise DataMissingValuesError, msg
4072            else:
4073                elevation_grid = elevation_grid*(missing==0) \
4074                                 + missing*elevation_NaN_filler
4075
4076        if verbose and j % ((n+10)/10) == 0: print '  Doing %d of %d' % (j, n)
4077
4078        i = 0
4079        for k in range(number_of_latitudes):      #Y direction
4080            for l in range(number_of_longitudes): #X direction
4081                w = zscale*elevation_grid[k,l] + mean_stage
4082                stage[j,i] = w
4083                h = w - z[i]
4084                xmomentum[j,i] = u_momentum_grid[k,l]*h
4085                ymomentum[j,i] = v_momentum_grid[k,l]*h
4086                i += 1
4087
4088    outfile.close()
4089
4090
4091##
4092# @brief Return max&min indexes (for slicing) of area specified.
4093# @param latitudes_ref ??
4094# @param longitudes_ref ??
4095# @param minlat Minimum latitude of specified area.
4096# @param maxlat Maximum latitude of specified area.
4097# @param minlon Minimum longitude of specified area.
4098# @param maxlon Maximum longitude of specified area.
4099# @return Tuple (lat_min_index, lat_max_index, lon_min_index, lon_max_index)
4100def _get_min_max_indexes(latitudes_ref,longitudes_ref,
4101                         minlat=None, maxlat=None,
4102                         minlon=None, maxlon=None):
4103    """
4104    Return max, min indexes (for slicing) of the lat and long arrays to cover
4105    the area specified with min/max lat/long.
4106
4107    Think of the latitudes and longitudes describing a 2d surface.
4108    The area returned is, if possible, just big enough to cover the
4109    inputed max/min area. (This will not be possible if the max/min area
4110    has a section outside of the latitudes/longitudes area.)
4111
4112    asset  longitudes are sorted,
4113    long - from low to high (west to east, eg 148 - 151)
4114    assert latitudes are sorted, ascending or decending
4115    """
4116
4117    latitudes = latitudes_ref[:]
4118    longitudes = longitudes_ref[:]
4119
4120    latitudes = ensure_numeric(latitudes)
4121    longitudes = ensure_numeric(longitudes)
4122
4123    assert allclose(sort(longitudes), longitudes)
4124
4125    #print latitudes[0],longitudes[0]
4126    #print len(latitudes),len(longitudes)
4127    #print latitudes[len(latitudes)-1],longitudes[len(longitudes)-1]
4128
4129    lat_ascending = True
4130    if not allclose(sort(latitudes), latitudes):
4131        lat_ascending = False
4132        # reverse order of lat, so it's in ascending order
4133        latitudes = latitudes[::-1]
4134        assert allclose(sort(latitudes), latitudes)
4135
4136    largest_lat_index = len(latitudes)-1
4137
4138    #Cut out a smaller extent.
4139    if minlat == None:
4140        lat_min_index = 0
4141    else:
4142        lat_min_index = searchsorted(latitudes, minlat)-1
4143        if lat_min_index <0:
4144            lat_min_index = 0
4145
4146    if maxlat == None:
4147        lat_max_index = largest_lat_index #len(latitudes)
4148    else:
4149        lat_max_index = searchsorted(latitudes, maxlat)
4150        if lat_max_index > largest_lat_index:
4151            lat_max_index = largest_lat_index
4152
4153    if minlon == None:
4154        lon_min_index = 0
4155    else:
4156        lon_min_index = searchsorted(longitudes, minlon)-1
4157        if lon_min_index <0:
4158            lon_min_index = 0
4159
4160    if maxlon == None:
4161        lon_max_index = len(longitudes)
4162    else:
4163        lon_max_index = searchsorted(longitudes, maxlon)
4164
4165    # Reversing the indexes, if the lat array is decending
4166    if lat_ascending is False:
4167        lat_min_index, lat_max_index = largest_lat_index - lat_max_index, \
4168                                       largest_lat_index - lat_min_index
4169    lat_max_index = lat_max_index + 1 # taking into account how slicing works
4170    lon_max_index = lon_max_index + 1 # taking into account how slicing works
4171
4172    return lat_min_index, lat_max_index, lon_min_index, lon_max_index
4173
4174
4175##
4176# @brief Read an ASC file.
4177# @parem filename Path to the file to read.
4178# @param verbose True if this function is to be verbose.
4179def _read_asc(filename, verbose=False):
4180    """Read esri file from the following ASCII format (.asc)
4181
4182    Example:
4183
4184    ncols         3121
4185    nrows         1800
4186    xllcorner     722000
4187    yllcorner     5893000
4188    cellsize      25
4189    NODATA_value  -9999
4190    138.3698 137.4194 136.5062 135.5558 ..........
4191    """
4192
4193    datafile = open(filename)
4194
4195    if verbose: print 'Reading DEM from %s' % filename
4196
4197    lines = datafile.readlines()
4198    datafile.close()
4199
4200    if verbose: print 'Got', len(lines), ' lines'
4201
4202    ncols = int(lines.pop(0).split()[1].strip())
4203    nrows = int(lines.pop(0).split()[1].strip())
4204    xllcorner = float(lines.pop(0).split()[1].strip())
4205    yllcorner = float(lines.pop(0).split()[1].strip())
4206    cellsize = float(lines.pop(0).split()[1].strip())
4207    NODATA_value = float(lines.pop(0).split()[1].strip())
4208
4209    assert len(lines) == nrows
4210
4211    #Store data
4212    grid = []
4213
4214    n = len(lines)
4215    for i, line in enumerate(lines):
4216        cells = line.split()
4217        assert len(cells) == ncols
4218        grid.append(array([float(x) for x in cells]))
4219    grid = array(grid)
4220
4221    return {'xllcorner':xllcorner,
4222            'yllcorner':yllcorner,
4223            'cellsize':cellsize,
4224            'NODATA_value':NODATA_value}, grid
4225
4226
4227    ####  URS 2 SWW  ###
4228
4229# Definitions of various NetCDF dimension names, etc.
4230lon_name = 'LON'
4231lat_name = 'LAT'
4232time_name = 'TIME'
4233precision = Float # So if we want to change the precision its done here
4234
4235##
4236# @brief Clas for a NetCDF data file writer.
4237class Write_nc:
4238    """Write an nc file.
4239
4240    Note, this should be checked to meet cdc netcdf conventions for gridded
4241    data. http://www.cdc.noaa.gov/cdc/conventions/cdc_netcdf_standard.shtml
4242    """
4243
4244    ##
4245    # @brief Instantiate a Write_nc instance.
4246    # @param quantity_name
4247    # @param file_name
4248    # @param time_step_count The number of time steps.
4249    # @param time_step The time_step size.
4250    # @param lon
4251    # @param lat
4252    def __init__(self,
4253                 quantity_name,
4254                 file_name,
4255                 time_step_count,
4256                 time_step,
4257                 lon,
4258                 lat):
4259        """Instantiate a Write_nc instance (NetCDF file writer).
4260
4261        time_step_count is the number of time steps.
4262        time_step is the time step size
4263
4264        pre-condition: quantity_name must be 'HA', 'UA'or 'VA'.
4265        """
4266
4267        self.quantity_name = quantity_name
4268        quantity_units = {'HA':'CENTIMETERS',
4269                          'UA':'CENTIMETERS/SECOND',
4270                          'VA':'CENTIMETERS/SECOND'}
4271
4272        multiplier_dic = {'HA':100.0,   # To convert from m to cm
4273                          'UA':100.0,   #             and m/s to cm/sec
4274                          'VA':-100.0}  # MUX files have positive x in the
4275                                        # Southern direction.  This corrects
4276                                        # for it, when writing nc files.
4277
4278        self.quantity_multiplier =  multiplier_dic[self.quantity_name]
4279
4280        #self.file_name = file_name
4281        self.time_step_count = time_step_count
4282        self.time_step = time_step
4283
4284        # NetCDF file definition
4285        self.outfile = NetCDFFile(file_name, netcdf_mode_w)
4286        outfile = self.outfile
4287
4288        #Create new file
4289        nc_lon_lat_header(outfile, lon, lat)
4290
4291        # TIME
4292        outfile.createDimension(time_name, None)
4293        outfile.createVariable(time_name, precision, (time_name,))
4294
4295        #QUANTITY
4296        outfile.createVariable(self.quantity_name, precision,
4297                               (time_name, lat_name, lon_name))
4298        outfile.variables[self.quantity_name].missing_value = -1.e+034
4299        outfile.variables[self.quantity_name].units = \
4300                                 quantity_units[self.quantity_name]
4301        outfile.variables[lon_name][:]= ensure_numeric(lon)
4302        outfile.variables[lat_name][:]= ensure_numeric(lat)
4303
4304        #Assume no one will be wanting to read this, while we are writing
4305        #outfile.close()
4306
4307    ##
4308    # @brief Write a time-step of quantity data.
4309    # @param quantity_slice The data to be stored for this time-step.
4310    def store_timestep(self, quantity_slice):
4311        """Write a time slice of quantity info
4312
4313        quantity_slice is the data to be stored at this time step
4314        """
4315
4316        # Get the variables
4317        time = self.outfile.variables[time_name]
4318        quantity = self.outfile.variables[self.quantity_name]
4319
4320        # get index oflice to write
4321        i = len(time)
4322
4323        #Store time
4324        time[i] = i * self.time_step    #self.domain.time
4325        quantity[i,:] = quantity_slice * self.quantity_multiplier
4326
4327    ##
4328    # @brief Close file underlying the class instance.
4329    def close(self):
4330        self.outfile.close()
4331
4332
4333##
4334# @brief Convert URS file to SWW file.
4335# @param basename_in Stem of the input filename.
4336# @param basename_out Stem of the output filename.
4337# @param verbose True if this function is to be verbose.
4338# @param remove_nc_files
4339# @param minlat Sets extent of area to be used.  If not supplied, full extent.
4340# @param maxlat Sets extent of area to be used.  If not supplied, full extent.
4341# @param minlon Sets extent of area to be used.  If not supplied, full extent.
4342# @param maxlon Sets extent of area to be used.  If not supplied, full extent.
4343# @param mint
4344# @param maxt
4345# @param mean_stage
4346# @param origin A 3-tuple with geo referenced UTM coordinates
4347# @param zscale
4348# @param fail_on_NaN
4349# @param NaN_filler
4350# @param elevation
4351# @note Also convert latitude and longitude to UTM. All coordinates are
4352#       assumed to be given in the GDA94 datum.
4353def urs2sww(basename_in='o', basename_out=None, verbose=False,
4354            remove_nc_files=True,
4355            minlat=None, maxlat=None,
4356            minlon=None, maxlon=None,
4357            mint=None, maxt=None,
4358            mean_stage=0,
4359            origin=None,
4360            zscale=1,
4361            fail_on_NaN=True,
4362            NaN_filler=0,
4363            elevation=None):
4364    """Convert a URS file to an SWW file.
4365    Convert URS C binary format for wave propagation to
4366    sww format native to abstract_2d_finite_volumes.
4367
4368    Specify only basename_in and read files of the form
4369    basefilename-z-mux2, basefilename-e-mux2 and
4370    basefilename-n-mux2 containing relative height,
4371    x-velocity and y-velocity, respectively.
4372
4373    Also convert latitude and longitude to UTM. All coordinates are
4374    assumed to be given in the GDA94 datum. The latitude and longitude
4375    information is for  a grid.
4376
4377    min's and max's: If omitted - full extend is used.
4378    To include a value min may equal it, while max must exceed it.
4379    Lat and lon are assumed to be in decimal degrees.
4380    NOTE: minlon is the most east boundary.
4381
4382    origin is a 3-tuple with geo referenced
4383    UTM coordinates (zone, easting, northing)
4384    It will be the origin of the sww file. This shouldn't be used,
4385    since all of anuga should be able to handle an arbitary origin.
4386
4387    URS C binary format has data orgainised as TIME, LONGITUDE, LATITUDE
4388    which means that latitude is the fastest
4389    varying dimension (row major order, so to speak)
4390
4391    In URS C binary the latitudes and longitudes are in assending order.
4392    """
4393
4394    if basename_out == None:
4395        basename_out = basename_in
4396
4397    files_out = urs2nc(basename_in, basename_out)
4398
4399    ferret2sww(basename_out,
4400               minlat=minlat,
4401               maxlat=maxlat,
4402               minlon=minlon,
4403               maxlon=maxlon,
4404               mint=mint,
4405               maxt=maxt,
4406               mean_stage=mean_stage,
4407               origin=origin,
4408               zscale=zscale,
4409               fail_on_NaN=fail_on_NaN,
4410               NaN_filler=NaN_filler,
4411               inverted_bathymetry=True,
4412               verbose=verbose)
4413   
4414    if remove_nc_files:
4415        for file_out in files_out:
4416            os.remove(file_out)
4417
4418
4419##
4420# @brief Convert 3 URS files back to 4 NC files.
4421# @param basename_in Stem of the input filenames.
4422# @param basename_outStem of the output filenames.
4423# @note The name of the urs file names must be:
4424#          [basename_in]-z-mux
4425#          [basename_in]-e-mux
4426#          [basename_in]-n-mux
4427def urs2nc(basename_in='o', basename_out='urs'):
4428    """Convert the 3 urs files to 4 nc files.
4429
4430    The name of the urs file names must be;
4431    [basename_in]-z-mux
4432    [basename_in]-e-mux
4433    [basename_in]-n-mux
4434    """
4435
4436    files_in = [basename_in + WAVEHEIGHT_MUX_LABEL,
4437                basename_in + EAST_VELOCITY_LABEL,
4438                basename_in + NORTH_VELOCITY_LABEL]
4439    files_out = [basename_out + '_ha.nc',
4440                 basename_out + '_ua.nc',
4441                 basename_out + '_va.nc']
4442    quantities = ['HA', 'UA', 'VA']
4443
4444    #if os.access(files_in[0]+'.mux', os.F_OK) == 0 :
4445    for i, file_name in enumerate(files_in):
4446        if os.access(file_name, os.F_OK) == 0:
4447            if os.access(file_name + '.mux', os.F_OK) == 0 :
4448                msg = 'File %s does not exist or is not accessible' % file_name
4449                raise IOError, msg
4450            else:
4451               files_in[i] += '.mux'
4452               print "file_name", file_name
4453
4454    hashed_elevation = None
4455    for file_in, file_out, quantity in map(None, files_in,
4456                                           files_out,
4457                                           quantities):
4458        lonlatdep, lon, lat, depth = _binary_c2nc(file_in,
4459                                                  file_out,
4460                                                  quantity)
4461        if hashed_elevation == None:
4462            elevation_file = basename_out + '_e.nc'
4463            write_elevation_nc(elevation_file,
4464                               lon,
4465                               lat,
4466                               depth)
4467            hashed_elevation = myhash(lonlatdep)
4468        else:
4469            msg = "The elevation information in the mux files is inconsistent"
4470            assert hashed_elevation == myhash(lonlatdep), msg
4471
4472    files_out.append(elevation_file)
4473
4474    return files_out
4475
4476
4477##
4478# @brief Convert a quantity URS file to a NetCDF file.
4479# @param file_in Path to input URS file.
4480# @param file_out Path to the output file.
4481# @param quantity Name of the quantity to be written to the output file.
4482# @return A tuple (lonlatdep, lon, lat, depth).
4483def _binary_c2nc(file_in, file_out, quantity):
4484    """Reads in a quantity urs file and writes a quantity nc file.
4485    Additionally, returns the depth and lat, long info,
4486    so it can be written to a file.
4487    """
4488
4489    columns = 3                           # long, lat , depth
4490    mux_file = open(file_in, 'rb')
4491
4492    # Number of points/stations
4493    (points_num,) = unpack('i', mux_file.read(4))
4494
4495    # nt, int - Number of time steps
4496    (time_step_count,) = unpack('i', mux_file.read(4))
4497
4498    #dt, float - time step, seconds
4499    (time_step,) = unpack('f', mux_file.read(4))
4500
4501    msg = "Bad data in the mux file."
4502    if points_num < 0:
4503        mux_file.close()
4504        raise ANUGAError, msg
4505    if time_step_count < 0:
4506        mux_file.close()
4507        raise ANUGAError, msg
4508    if time_step < 0:
4509        mux_file.close()
4510        raise ANUGAError, msg
4511
4512    lonlatdep = p_array.array('f')
4513    lonlatdep.read(mux_file, columns * points_num)
4514    lonlatdep = array(lonlatdep, typecode=Float)
4515    lonlatdep = reshape(lonlatdep, (points_num, columns))
4516
4517    lon, lat, depth = lon_lat2grid(lonlatdep)
4518    lon_sorted = list(lon)
4519    lon_sorted.sort()
4520
4521    if not lon == lon_sorted:
4522        msg = "Longitudes in mux file are not in ascending order"
4523        raise IOError, msg
4524
4525    lat_sorted = list(lat)
4526    lat_sorted.sort()
4527
4528# UNUSED?
4529##    if not lat == lat_sorted:
4530##        msg = "Latitudes in mux file are not in ascending order"
4531
4532    nc_file = Write_nc(quantity,
4533                       file_out,
4534                       time_step_count,
4535                       time_step,
4536                       lon,
4537                       lat)
4538
4539    for i in range(time_step_count):
4540        #Read in a time slice from mux file
4541        hz_p_array = p_array.array('f')
4542        hz_p_array.read(mux_file, points_num)
4543        hz_p = array(hz_p_array, typecode=Float)
4544        hz_p = reshape(hz_p, (len(lon), len(lat)))
4545        hz_p = transpose(hz_p)  # mux has lat varying fastest, nc has long v.f.
4546
4547        #write time slice to nc file
4548        nc_file.store_timestep(hz_p)
4549
4550    mux_file.close()
4551    nc_file.close()
4552
4553    return lonlatdep, lon, lat, depth
4554
4555
4556##
4557# @brief Write an NC elevation file.
4558# @param file_out Path to the output file.
4559# @param lon ??
4560# @param lat ??
4561# @param depth_vector The elevation data to write.
4562def write_elevation_nc(file_out, lon, lat, depth_vector):
4563    """Write an nc elevation file."""
4564
4565    # NetCDF file definition
4566    outfile = NetCDFFile(file_out, netcdf_mode_w)
4567
4568    #Create new file
4569    nc_lon_lat_header(outfile, lon, lat)
4570
4571    # ELEVATION
4572    zname = 'ELEVATION'
4573    outfile.createVariable(zname, precision, (lat_name, lon_name))
4574    outfile.variables[zname].units = 'CENTIMETERS'
4575    outfile.variables[zname].missing_value = -1.e+034
4576
4577    outfile.variables[lon_name][:] = ensure_numeric(lon)
4578    outfile.variables[lat_name][:] = ensure_numeric(lat)
4579
4580    depth = reshape(depth_vector, (len(lat), len(lon)))
4581    outfile.variables[zname][:] = depth
4582
4583    outfile.close()
4584
4585
4586##
4587# @brief Write lat/lon headers to a NetCDF file.
4588# @param outfile Handle to open file to write to.
4589# @param lon An iterable of the longitudes.
4590# @param lat An iterable of the latitudes.
4591# @note Defines lat/long dimensions and variables. Sets various attributes:
4592#          .point_spacing  and  .units
4593#       and writes lat/lon data.
4594
4595def nc_lon_lat_header(outfile, lon, lat):
4596    """Write lat/lon headers to a NetCDF file.
4597
4598    outfile is the netcdf file handle.
4599    lon - a list/array of the longitudes
4600    lat - a list/array of the latitudes
4601    """
4602
4603    outfile.institution = 'Geoscience Australia'
4604    outfile.description = 'Converted from URS binary C'
4605
4606    # Longitude
4607    outfile.createDimension(lon_name, len(lon))
4608    outfile.createVariable(lon_name, precision, (lon_name,))
4609    outfile.variables[lon_name].point_spacing = 'uneven'
4610    outfile.variables[lon_name].units = 'degrees_east'
4611    outfile.variables[lon_name].assignValue(lon)
4612
4613    # Latitude
4614    outfile.createDimension(lat_name, len(lat))
4615    outfile.createVariable(lat_name, precision, (lat_name,))
4616    outfile.variables[lat_name].point_spacing = 'uneven'
4617    outfile.variables[lat_name].units = 'degrees_north'
4618    outfile.variables[lat_name].assignValue(lat)
4619
4620
4621##
4622# @brief
4623# @param long_lat_dep
4624# @return A tuple (long, lat, quantity).
4625# @note The latitude is the fastest varying dimension - in mux files.
4626def lon_lat2grid(long_lat_dep):
4627    """
4628    given a list of points that are assumed to be an a grid,
4629    return the long's and lat's of the grid.
4630    long_lat_dep is an array where each row is a position.
4631    The first column is longitudes.
4632    The second column is latitudes.
4633
4634    The latitude is the fastest varying dimension - in mux files
4635    """
4636
4637    LONG = 0
4638    LAT = 1
4639    QUANTITY = 2
4640
4641    long_lat_dep = ensure_numeric(long_lat_dep, Float)
4642
4643    num_points = long_lat_dep.shape[0]
4644    this_rows_long = long_lat_dep[0,LONG]
4645
4646    # Count the length of unique latitudes
4647    i = 0
4648    while long_lat_dep[i,LONG] == this_rows_long and i < num_points:
4649        i += 1
4650
4651    # determine the lats and longsfrom the grid
4652    lat = long_lat_dep[:i, LAT]
4653    long = long_lat_dep[::i, LONG]
4654
4655    lenlong = len(long)
4656    lenlat = len(lat)
4657
4658    msg = 'Input data is not gridded'
4659    assert num_points % lenlat == 0, msg
4660    assert num_points % lenlong == 0, msg
4661
4662    # Test that data is gridded
4663    for i in range(lenlong):
4664        msg = 'Data is not gridded.  It must be for this operation'
4665        first = i * lenlat
4666        last = first + lenlat
4667
4668        assert allclose(long_lat_dep[first:last,LAT], lat), msg
4669        assert allclose(long_lat_dep[first:last,LONG], long[i]), msg
4670
4671    msg = 'Out of range latitudes/longitudes'
4672    for l in lat:assert -90 < l < 90 , msg
4673    for l in long:assert -180 < l < 180 , msg
4674
4675    # Changing quantity from lat being the fastest varying dimension to
4676    # long being the fastest varying dimension
4677    # FIXME - make this faster/do this a better way
4678    # use numeric transpose, after reshaping the quantity vector
4679    quantity = zeros(num_points, Float)
4680
4681    for lat_i, _ in enumerate(lat):
4682        for long_i, _ in enumerate(long):
4683            q_index = lat_i*lenlong + long_i
4684            lld_index = long_i*lenlat + lat_i
4685            temp = long_lat_dep[lld_index, QUANTITY]
4686            quantity[q_index] = temp
4687
4688    return long, lat, quantity
4689
4690################################################################################
4691# END URS 2 SWW
4692################################################################################
4693
4694################################################################################
4695# URS UNGRIDDED 2 SWW
4696################################################################################
4697
4698### PRODUCING THE POINTS NEEDED FILE ###
4699
4700# Ones used for FESA 2007 results
4701#LL_LAT = -50.0
4702#LL_LONG = 80.0
4703#GRID_SPACING = 1.0/60.0
4704#LAT_AMOUNT = 4800
4705#LONG_AMOUNT = 3600
4706
4707
4708##
4709# @brief
4710# @param file_name
4711# @param boundary_polygon
4712# @param zone
4713# @param ll_lat
4714# @param ll_long
4715# @param grid_spacing
4716# @param lat_amount
4717# @param long_amount
4718# @param isSouthernHemisphere
4719# @param export_csv
4720# @param use_cache
4721# @param verbose True if this function is to be verbose.
4722# @return
4723def URS_points_needed_to_file(file_name, boundary_polygon, zone,
4724                              ll_lat, ll_long,
4725                              grid_spacing,
4726                              lat_amount, long_amount,
4727                              isSouthernHemisphere=True,
4728                              export_csv=False, use_cache=False,
4729                              verbose=False):
4730    """
4731    Given the info to replicate the URS grid and a polygon output
4732    a file that specifies the cloud of boundary points for URS.
4733
4734    This creates a .urs file.  This is in the format used by URS;
4735    1st line is the number of points,
4736    each line after represents a point,in lats and longs.
4737
4738    Note: The polygon cannot cross zones or hemispheres.
4739
4740    A work-a-round for different zones or hemispheres is to run this twice,
4741    once for each zone, and then combine the output.
4742
4743    file_name - name of the urs file produced for David.
4744    boundary_polygon - a list of points that describes a polygon.
4745                      The last point is assumed ot join the first point.
4746                      This is in UTM (lat long would be better though)
4747
4748     This is info about the URS model that needs to be inputted.
4749
4750    ll_lat - lower left latitude, in decimal degrees
4751    ll-long - lower left longitude, in decimal degrees
4752    grid_spacing - in deciamal degrees
4753    lat_amount - number of latitudes
4754    long_amount- number of longs
4755
4756    Don't add the file extension.  It will be added.
4757    """
4758
4759    geo = URS_points_needed(boundary_polygon, zone, ll_lat, ll_long,
4760                            grid_spacing,
4761                            lat_amount, long_amount, isSouthernHemisphere,
4762                            use_cache, verbose)
4763
4764    if not file_name[-4:] == ".urs":
4765        file_name += ".urs"
4766
4767    geo.export_points_file(file_name, isSouthHemisphere=isSouthernHemisphere)
4768
4769    if export_csv:
4770        if file_name[-4:] == ".urs":
4771            file_name = file_name[:-4] + ".csv"
4772        geo.export_points_file(file_name)
4773
4774    return geo
4775
4776
4777##
4778# @brief
4779# @param boundary_polygon
4780# @param zone
4781# @param ll_lat
4782# @param ll_long
4783# @param grid_spacing
4784# @param lat_amount
4785# @param long_amount
4786# @param isSouthHemisphere
4787# @param use_cache
4788# @param verbose
4789def URS_points_needed(boundary_polygon, zone, ll_lat,
4790                      ll_long, grid_spacing,
4791                      lat_amount, long_amount, isSouthHemisphere=True,
4792                      use_cache=False, verbose=False):
4793    args = (boundary_polygon,
4794            zone, ll_lat,
4795            ll_long, grid_spacing,
4796            lat_amount, long_amount, isSouthHemisphere)
4797    kwargs = {}
4798
4799    if use_cache is True:
4800        try:
4801            from anuga.caching import cache
4802        except:
4803            msg = 'Caching was requested, but caching module' \
4804                  'could not be imported'
4805            raise msg
4806
4807        geo = cache(_URS_points_needed,
4808                    args, kwargs,
4809                    verbose=verbose,
4810                    compression=False)
4811    else:
4812        geo = apply(_URS_points_needed, args, kwargs)
4813
4814    return geo
4815
4816
4817##
4818# @brief
4819# @param boundary_polygon An iterable of points that describe a polygon.
4820# @param zone
4821# @param ll_lat Lower left latitude, in decimal degrees
4822# @param ll_long Lower left longitude, in decimal degrees
4823# @param grid_spacing Grid spacing in decimal degrees.
4824# @param lat_amount
4825# @param long_amount
4826# @param isSouthHemisphere
4827def _URS_points_needed(boundary_polygon,
4828                       zone, ll_lat,
4829                       ll_long, grid_spacing,
4830                       lat_amount, long_amount,
4831                       isSouthHemisphere):
4832    """
4833    boundary_polygon - a list of points that describes a polygon.
4834                      The last point is assumed ot join the first point.
4835                      This is in UTM (lat long would b better though)
4836
4837    ll_lat - lower left latitude, in decimal degrees
4838    ll-long - lower left longitude, in decimal degrees
4839    grid_spacing - in decimal degrees
4840
4841    """
4842
4843    from sets import ImmutableSet
4844
4845    msg = "grid_spacing can not be zero"
4846    assert not grid_spacing == 0, msg
4847
4848    a = boundary_polygon
4849
4850    # List of segments.  Each segment is two points.
4851    segs = [i and [a[i-1], a[i]] or [a[len(a)-1], a[0]] for i in range(len(a))]
4852
4853    # convert the segs to Lat's and longs.
4854    # Don't assume the zone of the segments is the same as the lower left
4855    # corner of the lat long data!!  They can easily be in different zones
4856    lat_long_set = ImmutableSet()
4857    for seg in segs:
4858        points_lat_long = points_needed(seg, ll_lat, ll_long, grid_spacing,
4859                                        lat_amount, long_amount, zone,
4860                                        isSouthHemisphere)
4861        lat_long_set |= ImmutableSet(points_lat_long)
4862
4863    if lat_long_set == ImmutableSet([]):
4864        msg = "URS region specified and polygon does not overlap."
4865        raise ValueError, msg
4866
4867    # Warning there is no info in geospatial saying the hemisphere of
4868    # these points.  There should be.
4869    geo = Geospatial_data(data_points=list(lat_long_set),
4870                          points_are_lats_longs=True)
4871
4872    return geo
4873
4874
4875##
4876# @brief Get the points that are needed to interpolate any point a a segment.
4877# @param seg Two points in the UTM.
4878# @param ll_lat Lower left latitude, in decimal degrees
4879# @param ll_long Lower left longitude, in decimal degrees
4880# @param grid_spacing
4881# @param lat_amount
4882# @param long_amount
4883# @param zone
4884# @param isSouthHemisphere
4885# @return A list of points.
4886def points_needed(seg, ll_lat, ll_long, grid_spacing,
4887                  lat_amount, long_amount, zone,
4888                  isSouthHemisphere):
4889    """
4890    seg is two points, in UTM
4891    return a list of the points, in lats and longs that are needed to
4892    interpolate any point on the segment.
4893    """
4894
4895    from math import sqrt
4896
4897    geo_reference = Geo_reference(zone=zone)
4898    geo = Geospatial_data(seg, geo_reference=geo_reference)
4899    seg_lat_long = geo.get_data_points(as_lat_long=True,
4900                                       isSouthHemisphere=isSouthHemisphere)
4901
4902    # 1.415 = 2^0.5, rounded up....
4903    sqrt_2_rounded_up = 1.415
4904    buffer = sqrt_2_rounded_up * grid_spacing
4905
4906    max_lat = max(seg_lat_long[0][0], seg_lat_long[1][0]) + buffer
4907    max_long = max(seg_lat_long[0][1], seg_lat_long[1][1]) + buffer
4908    min_lat = min(seg_lat_long[0][0], seg_lat_long[1][0]) - buffer
4909    min_long = min(seg_lat_long[0][1], seg_lat_long[1][1]) - buffer
4910
4911    first_row = (min_long - ll_long) / grid_spacing
4912
4913    # To round up
4914    first_row_long = int(round(first_row + 0.5))
4915
4916    last_row = (max_long - ll_long) / grid_spacing # round down
4917    last_row_long = int(round(last_row))
4918
4919    first_row = (min_lat - ll_lat) / grid_spacing
4920    # To round up
4921    first_row_lat = int(round(first_row + 0.5))
4922
4923    last_row = (max_lat - ll_lat) / grid_spacing # round down
4924    last_row_lat = int(round(last_row))
4925
4926    max_distance = 157147.4112 * grid_spacing
4927    points_lat_long = []
4928
4929    # Create a list of the lat long points to include.
4930    for index_lat in range(first_row_lat, last_row_lat + 1):
4931        for index_long in range(first_row_long, last_row_long + 1):
4932            lat = ll_lat + index_lat*grid_spacing
4933            long = ll_long + index_long*grid_spacing
4934
4935            #filter here to keep good points
4936            if keep_point(lat, long, seg, max_distance):
4937                points_lat_long.append((lat, long)) #must be hashable
4938
4939    # Now that we have these points, lets throw ones out that are too far away
4940    return points_lat_long
4941
4942
4943##
4944# @brief
4945# @param lat
4946# @param long
4947# @param seg Two points in UTM.
4948# @param max_distance
4949def keep_point(lat, long, seg, max_distance):
4950    """
4951    seg is two points, UTM
4952    """
4953
4954    from math import sqrt
4955
4956    _ , x0, y0 = redfearn(lat, long)
4957    x1 = seg[0][0]
4958    y1 = seg[0][1]
4959    x2 = seg[1][0]
4960    y2 = seg[1][1]
4961    x2_1 = x2-x1
4962    y2_1 = y2-y1
4963    try:
4964        d = abs((x2_1)*(y1-y0)-(x1-x0)*(y2_1))/sqrt( \
4965            (x2_1)*(x2_1)+(y2_1)*(y2_1))
4966    except ZeroDivisionError:
4967        if sqrt((x2_1)*(x2_1)+(y2_1)*(y2_1)) == 0 \
4968           and abs((x2_1)*(y1-y0)-(x1-x0)*(y2_1)) == 0:
4969            return True
4970        else:
4971            return False
4972
4973    return d <= max_distance
4974
4975################################################################################
4976# CONVERTING UNGRIDDED URS DATA TO AN SWW FILE
4977################################################################################
4978
4979WAVEHEIGHT_MUX_LABEL = '-z-mux'
4980EAST_VELOCITY_LABEL =  '-e-mux'
4981NORTH_VELOCITY_LABEL =  '-n-mux'
4982
4983##
4984# @brief Convert URS file(s) (wave prop) to an SWW file.
4985# @param basename_in Stem of the input filenames.
4986# @param basename_out Path to the output SWW file.
4987# @param verbose True if this function is to be verbose.
4988# @param mint
4989# @param maxt
4990# @param mean_stage
4991# @param origin Tuple with geo-ref UTM coordinates (zone, easting, northing).
4992# @param hole_points_UTM
4993# @param zscale
4994# @note Also convert latitude and longitude to UTM. All coordinates are
4995#       assumed to be given in the GDA94 datum.
4996# @note Input filename stem has suffixes '-z-mux', '-e-mux' and '-n-mux'
4997#       added for relative height, x-velocity and y-velocity respectively.
4998def urs_ungridded2sww(basename_in='o', basename_out=None, verbose=False,
4999                      mint=None, maxt=None,
5000                      mean_stage=0,
5001                      origin=None,
5002                      hole_points_UTM=None,
5003                      zscale=1):
5004    """
5005    Convert URS C binary format for wave propagation to
5006    sww format native to abstract_2d_finite_volumes.
5007
5008    Specify only basename_in and read files of the form
5009    basefilename-z-mux, basefilename-e-mux and
5010    basefilename-n-mux containing relative height,
5011    x-velocity and y-velocity, respectively.
5012
5013    Also convert latitude and longitude to UTM. All coordinates are
5014    assumed to be given in the GDA94 datum. The latitude and longitude
5015    information is assumed ungridded grid.
5016
5017    min's and max's: If omitted - full extend is used.
5018    To include a value min ans max may equal it.
5019    Lat and lon are assumed to be in decimal degrees.
5020
5021    origin is a 3-tuple with geo referenced
5022    UTM coordinates (zone, easting, northing)
5023    It will be the origin of the sww file. This shouldn't be used,
5024    since all of anuga should be able to handle an arbitary origin.
5025    The mux point info is NOT relative to this origin.
5026
5027    URS C binary format has data organised as TIME, LONGITUDE, LATITUDE
5028    which means that latitude is the fastest
5029    varying dimension (row major order, so to speak)
5030
5031    In URS C binary the latitudes and longitudes are in assending order.
5032
5033    Note, interpolations of the resulting sww file will be different
5034    from results of urs2sww.  This is due to the interpolation
5035    function used, and the different grid structure between urs2sww
5036    and this function.
5037
5038    Interpolating data that has an underlying gridded source can
5039    easily end up with different values, depending on the underlying
5040    mesh.
5041
5042    consider these 4 points
5043    50  -50
5044
5045    0     0
5046
5047    The grid can be
5048     -
5049    |\|   A
5050     -
5051     or;
5052      -
5053     |/|  B
5054      -
5055
5056    If a point is just below the center of the midpoint, it will have a
5057    +ve value in grid A and a -ve value in grid B.
5058    """
5059
5060    from anuga.mesh_engine.mesh_engine import NoTrianglesError
5061    from anuga.pmesh.mesh import Mesh
5062
5063    files_in = [basename_in + WAVEHEIGHT_MUX_LABEL,
5064                basename_in + EAST_VELOCITY_LABEL,
5065                basename_in + NORTH_VELOCITY_LABEL]
5066    quantities = ['HA','UA','VA']
5067
5068    # instantiate urs_points of the three mux files.
5069    mux = {}
5070    for quantity, file in map(None, quantities, files_in):
5071        mux[quantity] = Urs_points(file)
5072
5073    # Could check that the depth is the same. (hashing)
5074
5075    # handle to a mux file to do depth stuff
5076    a_mux = mux[quantities[0]]
5077
5078    # Convert to utm
5079    lat = a_mux.lonlatdep[:,1]
5080    long = a_mux.lonlatdep[:,0]
5081    points_utm, zone = convert_from_latlon_to_utm(latitudes=lat,
5082                                                  longitudes=long)
5083
5084    elevation = a_mux.lonlatdep[:,2] * -1
5085
5086    # grid (create a mesh from the selected points)
5087    # This mesh has a problem.  Triangles are streched over ungridded areas.
5088    # If these areas could be described as holes in pmesh, that would be great.
5089
5090    # I can't just get the user to selection a point in the middle.
5091    # A boundary is needed around these points.
5092    # But if the zone of points is obvious enough auto-segment should do
5093    # a good boundary.
5094    mesh = Mesh()
5095    mesh.add_vertices(points_utm)
5096    mesh.auto_segment(smooth_indents=True, expand_pinch=True)
5097
5098    # To try and avoid alpha shape 'hugging' too much
5099    mesh.auto_segment(mesh.shape.get_alpha() * 1.1)
5100    if hole_points_UTM is not None:
5101        point = ensure_absolute(hole_points_UTM)
5102        mesh.add_hole(point[0], point[1])
5103
5104    try:
5105        mesh.generate_mesh(minimum_triangle_angle=0.0, verbose=False)
5106    except NoTrianglesError:
5107        # This is a bit of a hack, going in and changing the data structure.
5108        mesh.holes = []
5109        mesh.generate_mesh(minimum_triangle_angle=0.0, verbose=False)
5110
5111    mesh_dic = mesh.Mesh2MeshList()
5112
5113    #mesh.export_mesh_file(basename_in + '_168.tsh')
5114    #import sys; sys.exit()
5115    # These are the times of the mux file
5116    mux_times = []
5117    for i in range(a_mux.time_step_count):
5118        mux_times.append(a_mux.time_step * i)
5119    (mux_times_start_i, mux_times_fin_i) = mux2sww_time(mux_times, mint, maxt)
5120    times = mux_times[mux_times_start_i:mux_times_fin_i]
5121
5122    if mux_times_start_i == mux_times_fin_i:
5123        # Close the mux files
5124        for quantity, file in map(None, quantities, files_in):
5125            mux[quantity].close()
5126        msg = "Due to mint and maxt there's no time info in the boundary SWW."
5127        raise Exception, msg
5128
5129    # If this raise is removed there is currently no downstream errors
5130
5131    points_utm=ensure_numeric(points_utm)
5132    assert ensure_numeric(mesh_dic['generatedpointlist']) \
5133           == ensure_numeric(points_utm)
5134
5135    volumes = mesh_dic['generatedtrianglelist']
5136
5137    # write sww intro and grid stuff.
5138    if basename_out is None:
5139        swwname = basename_in + '.sww'
5140    else:
5141        swwname = basename_out + '.sww'
5142
5143    if verbose: print 'Output to ', swwname
5144
5145    outfile = NetCDFFile(swwname, netcdf_mode_w)
5146
5147    # For a different way of doing this, check out tsh2sww
5148    # work out sww_times and the index range this covers
5149    sww = Write_sww()
5150    sww.store_header(outfile, times, len(volumes), len(points_utm),
5151                     verbose=verbose, sww_precision=Float)
5152    outfile.mean_stage = mean_stage
5153    outfile.zscale = zscale
5154
5155    sww.store_triangulation(outfile, points_utm, volumes,
5156                            elevation, zone,  new_origin=origin,
5157                            verbose=verbose)
5158
5159    if verbose: print 'Converting quantities'
5160
5161    # Read in a time slice from each mux file and write it to the SWW file
5162    j = 0
5163    for ha, ua, va in map(None, mux['HA'], mux['UA'], mux['VA']):
5164        if j >= mux_times_start_i and j < mux_times_fin_i:
5165            stage = zscale*ha + mean_stage
5166            h = stage - elevation
5167            xmomentum = ua*h
5168            ymomentum = -1 * va * h # -1 since in mux files south is positive.
5169            sww.store_quantities(outfile,
5170                                 slice_index=j-mux_times_start_i,
5171                                 verbose=verbose,
5172                                 stage=stage,
5173                                 xmomentum=xmomentum,
5174                                 ymomentum=ymomentum,
5175                                 sww_precision=Float)
5176        j += 1
5177
5178    if verbose: sww.verbose_quantities(outfile)
5179
5180    outfile.close()
5181
5182    # Do some conversions while writing the sww file
5183
5184
5185################################################################################
5186# READ MUX2 FILES line of points
5187################################################################################
5188
5189WAVEHEIGHT_MUX2_LABEL = '-z-mux2'
5190EAST_VELOCITY_MUX2_LABEL = '-e-mux2'
5191NORTH_VELOCITY_MUX2_LABEL = '-n-mux2'
5192
5193##
5194# @brief
5195# @param filenames List of mux2 format input filenames.
5196# @param weights Weights associated with each source file.
5197# @param permutation The gauge numbers for which data is extracted.
5198# @param verbose True if this function is to be verbose.
5199# @return (times, latitudes, longitudes, elevation, quantity, starttime)
5200def read_mux2_py(filenames,
5201                 weights=None,
5202                 permutation=None,
5203                 verbose=False):
5204    """Access the mux files specified in the filenames list. Combine the
5205       data found therin as a weighted linear sum as specifed by the weights.
5206       If permutation is None or empty extract timeseries data for all gauges
5207       within the files.
5208
5209       Input:
5210           filenames:   List of filenames specifiying the file containing the
5211                        timeseries data (mux2 format) for each source
5212           weights:     Weighs associated with each source
5213                        (defaults to 1 for each source)
5214           permutation: Specifies the gauge numbers that for which data is to be
5215                        extracted
5216    """
5217
5218    from Numeric import ones,Float,compress,zeros,arange
5219    from urs_ext import read_mux2
5220
5221    numSrc = len(filenames)
5222
5223    file_params = -1 * ones(3, Float)                    # [nsta,dt,nt]
5224
5225    # Convert verbose to int C flag
5226    if verbose:
5227        verbose=1
5228    else:
5229        verbose=0
5230
5231    if weights is None:
5232        weights = ones(numSrc)
5233
5234    if permutation is None:
5235        permutation = ensure_numeric([], Float)
5236
5237    # Call underlying C implementation urs2sts_ext.c
5238    data = read_mux2(numSrc, filenames, weights, file_params,
5239                     permutation, verbose)
5240
5241    msg = 'File parameter values were not read in correctly from c file'
5242    assert len(compress(file_params > 0, file_params)) != 0, msg
5243
5244    msg = 'The number of stations specifed in the c array and in the file ' \
5245          'are inconsistent'
5246    assert file_params[0] >= len(permutation), msg
5247
5248    msg = 'The number of stations returned is inconsistent with ' \
5249          'the requested number'
5250    assert len(permutation) == 0 or len(permutation) == data.shape[0], msg
5251
5252    nsta = int(file_params[0])
5253    msg = 'Must have at least one station'
5254    assert nsta > 0, msg
5255
5256    dt = file_params[1]
5257    msg = 'Must have a postive timestep'
5258    assert dt > 0, msg
5259
5260    nt = int(file_params[2])
5261    msg = 'Must have at least one gauge value'
5262    assert nt > 0, msg
5263
5264    OFFSET = 5 # Number of site parameters p passed back with data
5265               # p = [geolat,geolon,depth,start_tstep,finish_tstep]
5266
5267    # FIXME (Ole): What is the relationship with params and data.shape ?
5268    # It looks as if the following asserts should pass but they don't always
5269    #
5270    #msg = 'nt = %d, data.shape[1] == %d' %(nt, data.shape[1])
5271    #assert nt == data.shape[1] - OFFSET, msg
5272    #
5273    #msg = 'nsta = %d, data.shape[0] == %d' %(nsta, data.shape[0])
5274    #assert nsta == data.shape[0], msg
5275
5276    # Number of stations in ordering file
5277    number_of_selected_stations = data.shape[0]
5278
5279    # Index where data ends and parameters begin
5280    parameters_index = data.shape[1] - OFFSET
5281
5282    times = dt * arange(parameters_index)
5283    latitudes = zeros(number_of_selected_stations, Float)
5284    longitudes = zeros(number_of_selected_stations, Float)
5285    elevation = zeros(number_of_selected_stations, Float)
5286    quantity = zeros((number_of_selected_stations, parameters_index), Float)
5287
5288    starttime = 1e16
5289    for i in range(number_of_selected_stations):
5290        quantity[i][:] = data[i][:parameters_index]
5291        latitudes[i] = data[i][parameters_index]
5292        longitudes[i] = data[i][parameters_index+1]
5293        elevation[i] = -data[i][parameters_index+2]
5294        first_time_step = data[i][parameters_index+3]
5295        starttime = min(dt*first_time_step, starttime)
5296
5297    return times, latitudes, longitudes, elevation, quantity, starttime
5298
5299
5300##
5301# @brief ??
5302# @param mux_times ??
5303# @param mint ??
5304# @param maxt ??
5305# @return ??
5306def mux2sww_time(mux_times, mint, maxt):
5307    """
5308    """
5309
5310    if mint == None:
5311        mux_times_start_i = 0
5312    else:
5313        mux_times_start_i = searchsorted(mux_times, mint)
5314
5315    if maxt == None:
5316        mux_times_fin_i = len(mux_times)
5317    else:
5318        maxt += 0.5 # so if you specify a time where there is
5319                    # data that time will be included
5320        mux_times_fin_i = searchsorted(mux_times, maxt)
5321
5322    return mux_times_start_i, mux_times_fin_i
5323
5324
5325##
5326# @brief Convert a URS (mux2, wave propagation) file to an STS file.
5327# @param basename_in String (or list) of source file stems.
5328# @param basename_out Stem of output STS file path.
5329# @param weights
5330# @param verbose True if this function is to be verbose.
5331# @param origin Tuple with with geo-ref UTM coords (zone, easting, northing).
5332# @param zone
5333# @param mean_stage
5334# @param zscale
5335# @param ordering_filename Path of a file specifying which mux2 gauge points are
5336#                          to be stored.
5337# @note Also convert latitude and longitude to UTM. All coordinates are
5338#       assumed to be given in the GDA94 datum.
5339def urs2sts(basename_in, basename_out=None,
5340            weights=None,
5341            verbose=False,
5342            origin=None,
5343            zone=None,
5344            mean_stage=0.0,
5345            zscale=1.0,
5346            ordering_filename=None):
5347    """Convert URS mux2 format for wave propagation to sts format
5348
5349    Also convert latitude and longitude to UTM. All coordinates are
5350    assumed to be given in the GDA94 datum
5351
5352    origin is a 3-tuple with geo referenced
5353    UTM coordinates (zone, easting, northing)
5354
5355    inputs:
5356
5357    basename_in: list of source file prefixes
5358
5359        These are combined with the extensions:
5360        WAVEHEIGHT_MUX2_LABEL = '-z-mux2' for stage
5361        EAST_VELOCITY_MUX2_LABEL = '-e-mux2' xmomentum
5362        NORTH_VELOCITY_MUX2_LABEL = '-n-mux2' and ymomentum
5363
5364        to create a 2D list of mux2 file. The rows are associated with each
5365        quantity and must have the above extensions
5366        the columns are the list of file prefixes.
5367
5368    ordering: a .txt file name specifying which mux2 gauge points are
5369              to be stored. This is indicated by the index of the gauge
5370              in the ordering file.
5371
5372              ordering file format:
5373              1st line:    'index,longitude,latitude\n'
5374              other lines: index,longitude,latitude
5375
5376              If ordering is None or ordering file is empty then
5377               all points are taken in the order they
5378              appear in the mux2 file.
5379
5380
5381    output:
5382      basename_out: name of sts file in which mux2 data is stored.
5383    """
5384
5385    import os
5386    from Scientific.IO.NetCDF import NetCDFFile
5387    from Numeric import Float, Int, Int32, searchsorted, zeros, array
5388    from Numeric import allclose, around,ones,Float
5389    from types import ListType,StringType
5390    from operator import __and__
5391
5392    if not isinstance(basename_in, ListType):
5393        if verbose: print 'Reading single source'
5394        basename_in = [basename_in]
5395
5396    # This is the value used in the mux file format to indicate NAN data
5397    # FIXME (Ole): This should be changed everywhere to IEEE NAN when
5398    #              we upgrade to Numpy
5399    NODATA = 99
5400
5401    # Check that basename is a list of strings
5402    if not reduce(__and__, map(lambda z:isinstance(z,StringType), basename_in)):
5403        msg= 'basename_in must be a string or list of strings'
5404        raise Exception, msg
5405
5406    # Find the number of sources to be used
5407    numSrc = len(basename_in)
5408
5409    # A weight must be specified for each source
5410    if weights is None:
5411        # Default is equal weighting
5412        weights = ones(numSrc, Float) / numSrc
5413    else:
5414        weights = ensure_numeric(weights)
5415        msg = 'When combining multiple sources must specify a weight for ' \
5416              'mux2 source file'
5417        assert len(weights) == numSrc, msg
5418
5419    if verbose: print 'Weights used in urs2sts:', weights
5420
5421    # Check output filename
5422    if basename_out is None:
5423        msg = 'STS filename must be specified as basename_out ' \
5424              'in function urs2sts'
5425        raise Exception, msg
5426
5427    if basename_out.endswith('.sts'):
5428        stsname = basename_out
5429    else:
5430        stsname = basename_out + '.sts'
5431
5432    # Create input filenames from basenames and check their existence
5433    files_in = [[], [], []]
5434    for files in basename_in:
5435        files_in[0].append(files + WAVEHEIGHT_MUX2_LABEL),
5436        files_in[1].append(files + EAST_VELOCITY_MUX2_LABEL)
5437        files_in[2].append(files + NORTH_VELOCITY_MUX2_LABEL)
5438
5439    quantities = ['HA','UA','VA'] # Quantity names used in the MUX2 format
5440    for i in range(len(quantities)):
5441        for file_in in files_in[i]:
5442            if (os.access(file_in, os.F_OK) == 0):
5443                msg = 'File %s does not exist or is not accessible' % file_in
5444                raise IOError, msg
5445
5446    # Establish permutation array
5447    if ordering_filename is not None:
5448        if verbose is True: print 'Reading ordering file', ordering_filename
5449
5450        # Read ordering file
5451        try:
5452            fid = open(ordering_filename, 'r')
5453            file_header = fid.readline().split(',')
5454            ordering_lines = fid.readlines()
5455            fid.close()
5456        except:
5457            msg = 'Cannot open %s' % ordering_filename
5458            raise Exception, msg
5459
5460        reference_header = 'index, longitude, latitude\n'
5461        reference_header_split = reference_header.split(',')
5462        for i in range(3):
5463            if not file_header[i].strip() == reference_header_split[i].strip():
5464                msg = 'File must contain header: ' + reference_header
5465                raise Exception, msg
5466
5467        if len(ordering_lines) < 2:
5468            msg = 'File must contain at least two points'
5469            raise Exception, msg
5470
5471        permutation = [int(line.split(',')[0]) for line in ordering_lines]
5472        permutation = ensure_numeric(permutation)
5473    else:
5474        permutation = None
5475
5476    # Read MUX2 files
5477    if (verbose): print 'reading mux2 file'
5478
5479    mux={}
5480    for i, quantity in enumerate(quantities):
5481        # For each quantity read the associated list of source mux2 file with
5482        # extention associated with that quantity
5483
5484        times, latitudes, longitudes, elevation, mux[quantity], starttime \
5485            = read_mux2_py(files_in[i], weights, permutation, verbose=verbose)
5486
5487        # Check that all quantities have consistent time and space information
5488        if quantity != quantities[0]:
5489            msg = '%s, %s and %s have inconsistent gauge data' \
5490                  % (files_in[0], files_in[1], files_in[2])
5491            assert allclose(times, times_old), msg
5492            assert allclose(latitudes, latitudes_old), msg
5493            assert allclose(longitudes, longitudes_old), msg
5494            assert allclose(elevation, elevation_old), msg
5495            assert allclose(starttime, starttime_old), msg
5496        times_old = times
5497        latitudes_old = latitudes
5498        longitudes_old = longitudes
5499        elevation_old = elevation
5500        starttime_old = starttime
5501
5502        # Self check - can be removed to improve speed
5503        #ref_longitudes = [float(line.split(',')[1]) for line in ordering_lines]
5504        #ref_latitudes = [float(line.split(',')[2]) for line in ordering_lines]
5505        #
5506        #msg = 'Longitudes specified in ordering file do not match those ' \
5507        #      'found in mux files. ' \
5508        #      'I got %s instead of %s (only beginning shown)' \
5509        #      % (str(longitudes[:10]) + '...',
5510        #         str(ref_longitudes[:10]) + '...')
5511        #assert allclose(longitudes, ref_longitudes), msg
5512        #
5513        #msg = 'Latitudes specified in ordering file do not match those ' \
5514        #      'found in mux files. '
5515        #      'I got %s instead of %s (only beginning shown)' \
5516        #      % (str(latitudes[:10]) + '...',
5517        #         str(ref_latitudes[:10]) + '...')
5518        #assert allclose(latitudes, ref_latitudes), msg
5519
5520    # Store timeseries in STS file
5521    msg = 'File is empty and or clipped region not in file region'
5522    assert len(latitudes > 0), msg
5523
5524    number_of_points = latitudes.shape[0]      # Number of stations retrieved
5525    number_of_times = times.shape[0]           # Number of timesteps
5526    number_of_latitudes = latitudes.shape[0]   # Number latitudes
5527    number_of_longitudes = longitudes.shape[0] # Number longitudes
5528
5529    # The permutation vector of contains original indices
5530    # as given in ordering file or None in which case points
5531    # are assigned the trivial indices enumerating them from
5532    # 0 to number_of_points-1
5533    if permutation is None:
5534        permutation = arange(number_of_points, typecode=Int)
5535
5536    # NetCDF file definition
5537    outfile = NetCDFFile(stsname, netcdf_mode_w)
5538
5539    description = 'Converted from URS mux2 files: %s' % basename_in
5540
5541    # Create new file
5542    sts = Write_sts()
5543    sts.store_header(outfile,
5544                     times+starttime,
5545                     number_of_points,
5546                     description=description,
5547                     verbose=verbose,
5548                     sts_precision=Float)
5549
5550    # Store
5551    from anuga.coordinate_transforms.redfearn import redfearn
5552
5553    x = zeros(number_of_points, Float)  # Easting
5554    y = zeros(number_of_points, Float)  # Northing
5555
5556    # Check zone boundaries
5557    if zone is None:
5558        refzone, _, _ = redfearn(latitudes[0], longitudes[0])
5559    else:
5560        refzone = zone
5561
5562    old_zone = refzone
5563
5564    for i in range(number_of_points):
5565        zone, easting, northing = redfearn(latitudes[i], longitudes[i],
5566                                           zone=zone)
5567        x[i] = easting
5568        y[i] = northing
5569        if zone != refzone:
5570            msg = 'All sts gauges need to be in the same zone. \n'
5571            msg += 'offending gauge:Zone %d,%.4f, %4f\n' \
5572                   % (zone, easting, northing)
5573            msg += 'previous gauge:Zone %d,%.4f, %4f' \
5574                   % (old_zone, old_easting, old_northing)
5575            raise Exception, msg
5576        old_zone = zone
5577        old_easting = easting
5578        old_northing = northing
5579
5580    if origin is None:
5581        origin = Geo_reference(refzone, min(x), min(y))
5582    geo_ref = write_NetCDF_georeference(origin, outfile)
5583
5584    elevation = resize(elevation, outfile.variables['elevation'][:].shape)
5585    outfile.variables['permutation'][:] = permutation.astype(Int32) # Opteron 64
5586    outfile.variables['x'][:] = x - geo_ref.get_xllcorner()
5587    outfile.variables['y'][:] = y - geo_ref.get_yllcorner()
5588    outfile.variables['elevation'][:] = elevation
5589
5590    stage = outfile.variables['stage']
5591    xmomentum = outfile.variables['xmomentum']
5592    ymomentum = outfile.variables['ymomentum']
5593
5594    if verbose: print 'Converting quantities'
5595
5596    for j in range(len(times)):
5597        for i in range(number_of_points):
5598            ha = mux['HA'][i,j]
5599            ua = mux['UA'][i,j]
5600            va = mux['VA'][i,j]
5601            if ha == NODATA:
5602                if verbose:
5603                    msg = 'Setting nodata value %d to 0 at time = %f, ' \
5604                          'point = %d' % (ha, times[j], i)
5605                    print msg
5606                ha = 0.0
5607                ua = 0.0
5608                va = 0.0
5609
5610            w = zscale*ha + mean_stage
5611            h = w - elevation[i]
5612            stage[j,i] = w
5613
5614            xmomentum[j,i] = ua * h
5615            ymomentum[j,i] = va * h
5616
5617    outfile.close()
5618
5619
5620##
5621# @brief Create a list of points defining a boundary from an STS file.
5622# @param stsname Stem of path to the STS file to read.
5623# @return A list of boundary points.
5624def create_sts_boundary(stsname):
5625    """Create a list of points defining a boundary from an STS file.
5626
5627    Create boundary segments from .sts file. Points can be stored in
5628    arbitrary order within the .sts file. The order in which the .sts points
5629    make up the boundary are given in order.txt file
5630
5631    FIXME:
5632    Point coordinates are stored in relative eastings and northings.
5633    But boundary is produced in absolute coordinates
5634    """
5635
5636    try:
5637        fid = NetCDFFile(stsname + '.sts', netcdf_mode_r)
5638    except:
5639        msg = 'Cannot open %s' % stsname + '.sts'
5640        raise msg
5641
5642    xllcorner = fid.xllcorner[0]
5643    yllcorner = fid.yllcorner[0]
5644
5645    #Points stored in sts file are normalised to [xllcorner,yllcorner] but
5646    #we cannot assume that boundary polygon will be. At least the
5647    #additional points specified by the user after this function is called
5648    x = fid.variables['x'][:] + xllcorner
5649    y = fid.variables['y'][:] + yllcorner
5650
5651    x = reshape(x, (len(x),1))
5652    y = reshape(y, (len(y),1))
5653    sts_points = concatenate((x,y), axis=1)
5654
5655    return sts_points.tolist()
5656
5657
5658##
5659# @brief A class to write an SWW file.
5660class Write_sww:
5661    from anuga.shallow_water.shallow_water_domain import Domain
5662
5663    # FIXME (Ole): Hardwiring the conserved quantities like
5664    # this could be a problem. I would prefer taking them from
5665    # the instantiation of Domain.
5666    #
5667    # (DSG) There is not always a Domain instance when Write_sww is used.
5668    # Check to see if this is the same level of hardwiring as is in
5669    # shallow water doamain.
5670
5671    sww_quantities = Domain.conserved_quantities
5672
5673    RANGE = '_range'
5674    EXTREMA = ':extrema'
5675
5676    ##
5677    # brief Instantiate the SWW writer class.
5678    def __init__(self):
5679        pass
5680
5681    ##
5682    # @brief Store a header in the SWW file.
5683    # @param outfile Open handle to the file that will be written.
5684    # @param times A list of time slices *or* a start time.
5685    # @param number_of_volumes The number of triangles.
5686    # @param number_of_points The number of points.
5687    # @param description The internal file description string.
5688    # @param smoothing True if smoothing is to be used.
5689    # @param order
5690    # @param sww_precision Data type of the quantitiy to be written (Float32)
5691    # @param verbose True if this function is to be verbose.
5692    # @note If 'times' is a list, the info will be made relative.
5693    def store_header(self,
5694                     outfile,
5695                     times,
5696                     number_of_volumes,
5697                     number_of_points,
5698                     description='Converted from XXX',
5699                     smoothing=True,
5700                     order=1,
5701                     sww_precision=Float32,
5702                     verbose=False):
5703        """Write an SWW file header.
5704
5705        outfile - the open file that will be written
5706        times - A list of the time slice times OR a start time
5707        Note, if a list is given the info will be made relative.
5708        number_of_volumes - the number of triangles
5709        """
5710
5711        outfile.institution = 'Geoscience Australia'
5712        outfile.description = description
5713
5714        # For sww compatibility
5715        if smoothing is True:
5716            # Smoothing to be depreciated
5717            outfile.smoothing = 'Yes'
5718            outfile.vertices_are_stored_uniquely = 'False'
5719        else:
5720            # Smoothing to be depreciated
5721            outfile.smoothing = 'No'
5722            outfile.vertices_are_stored_uniquely = 'True'
5723        outfile.order = order
5724
5725        try:
5726            revision_number = get_revision_number()
5727        except:
5728            revision_number = None
5729        # Allow None to be stored as a string
5730        outfile.revision_number = str(revision_number)
5731
5732        # This is being used to seperate one number from a list.
5733        # what it is actually doing is sorting lists from numeric arrays.
5734        if type(times) is list or type(times) is ArrayType:
5735            number_of_times = len(times)
5736            times = ensure_numeric(times)
5737            if number_of_times == 0:
5738                starttime = 0
5739            else:
5740                starttime = times[0]
5741                times = times - starttime  #Store relative times
5742        else:
5743            number_of_times = 0
5744            starttime = times
5745            #times = ensure_numeric([])
5746
5747        outfile.starttime = starttime
5748
5749        # dimension definitions
5750        outfile.createDimension('number_of_volumes', number_of_volumes)
5751        outfile.createDimension('number_of_vertices', 3)
5752        outfile.createDimension('numbers_in_range', 2)
5753
5754        if smoothing is True:
5755            outfile.createDimension('number_of_points', number_of_points)
5756            # FIXME(Ole): This will cause sww files for parallel domains to
5757            # have ghost nodes stored (but not used by triangles).
5758            # To clean this up, we have to change get_vertex_values and
5759            # friends in quantity.py (but I can't be bothered right now)
5760        else:
5761            outfile.createDimension('number_of_points', 3*number_of_volumes)
5762
5763        outfile.createDimension('number_of_timesteps', number_of_times)
5764
5765        # variable definitions
5766        outfile.createVariable('x', sww_precision, ('number_of_points',))
5767        outfile.createVariable('y', sww_precision, ('number_of_points',))
5768        outfile.createVariable('elevation', sww_precision,
5769                               ('number_of_points',))
5770        q = 'elevation'
5771        outfile.createVariable(q + Write_sww.RANGE, sww_precision,
5772                               ('numbers_in_range',))
5773
5774
5775        # Initialise ranges with small and large sentinels.
5776        # If this was in pure Python we could have used None sensibly
5777        outfile.variables[q+Write_sww.RANGE][0] = max_float  # Min
5778        outfile.variables[q+Write_sww.RANGE][1] = -max_float # Max
5779
5780        # FIXME: Backwards compatibility
5781        outfile.createVariable('z', sww_precision, ('number_of_points',))
5782
5783        outfile.createVariable('volumes', Int, ('number_of_volumes',
5784                                                'number_of_vertices'))
5785
5786        # Doing sww_precision instead of Float gives cast errors.
5787        outfile.createVariable('time', Float,
5788                               ('number_of_timesteps',))
5789
5790        for q in Write_sww.sww_quantities:
5791            outfile.createVariable(q, sww_precision, ('number_of_timesteps',
5792                                                      'number_of_points'))
5793            outfile.createVariable(q + Write_sww.RANGE, sww_precision,
5794                                   ('numbers_in_range',))
5795
5796            # Initialise ranges with small and large sentinels.
5797            # If this was in pure Python we could have used None sensibly
5798            outfile.variables[q+Write_sww.RANGE][0] = max_float  # Min
5799            outfile.variables[q+Write_sww.RANGE][1] = -max_float # Max
5800
5801        if type(times) is list or type(times) is ArrayType:
5802            outfile.variables['time'][:] = times    #Store time relative
5803
5804        if verbose:
5805            print '------------------------------------------------'
5806            print 'Statistics:'
5807            print '    t in [%f, %f], len(t) == %d' \
5808                  % (min(times.flat), max(times.flat), len(times.flat))
5809
5810    ##
5811    # @brief Store triangulation data in the underlying file.
5812    # @param outfile Open handle to underlying file.
5813    # @param points_utm List or array of points in UTM.
5814    # @param volumes
5815    # @param elevation
5816    # @param zone
5817    # @param new_origin georeference that the points can be set to.
5818    # @param points_georeference The georeference of the points_utm.
5819    # @param verbose True if this function is to be verbose.
5820    def store_triangulation(self,
5821                            outfile,
5822                            points_utm,
5823                            volumes,
5824                            elevation, zone=None, new_origin=None,
5825                            points_georeference=None, verbose=False):
5826        """
5827        new_origin - qa georeference that the points can be set to. (Maybe
5828        do this before calling this function.)
5829
5830        points_utm - currently a list or array of the points in UTM.
5831        points_georeference - the georeference of the points_utm
5832
5833        How about passing new_origin and current_origin.
5834        If you get both, do a convertion from the old to the new.
5835
5836        If you only get new_origin, the points are absolute,
5837        convert to relative
5838
5839        if you only get the current_origin the points are relative, store
5840        as relative.
5841
5842        if you get no georefs create a new georef based on the minimums of
5843        points_utm.  (Another option would be to default to absolute)
5844
5845        Yes, and this is done in another part of the code.
5846        Probably geospatial.
5847
5848        If you don't supply either geo_refs, then supply a zone. If not
5849        the default zone will be used.
5850
5851        precon:
5852            header has been called.
5853        """
5854
5855        number_of_points = len(points_utm)
5856        volumes = array(volumes)
5857        points_utm = array(points_utm)
5858
5859        # given the two geo_refs and the points, do the stuff
5860        # described in the method header
5861        # if this is needed else where, pull out as a function
5862        points_georeference = ensure_geo_reference(points_georeference)
5863        new_origin = ensure_geo_reference(new_origin)
5864        if new_origin is None and points_georeference is not None:
5865            points = points_utm
5866            geo_ref = points_georeference
5867        else:
5868            if new_origin is None:
5869                new_origin = Geo_reference(zone, min(points_utm[:,0]),
5870                                                 min(points_utm[:,1]))
5871            points = new_origin.change_points_geo_ref(points_utm,
5872                                                      points_georeference)
5873            geo_ref = new_origin
5874
5875        # At this stage I need a georef and points
5876        # the points are relative to the georef
5877        geo_ref.write_NetCDF(outfile)
5878
5879        # This will put the geo ref in the middle
5880        #geo_ref = Geo_reference(refzone,(max(x)+min(x))/2.0,(max(x)+min(y))/2.)
5881
5882        x =  points[:,0]
5883        y =  points[:,1]
5884        z = outfile.variables['z'][:]
5885
5886        if verbose:
5887            print '------------------------------------------------'
5888            print 'More Statistics:'
5889            print '  Extent (/lon):'
5890            print '    x in [%f, %f], len(lat) == %d' \
5891                  % (min(x), max(x), len(x))
5892            print '    y in [%f, %f], len(lon) == %d' \
5893                  % (min(y), max(y), len(y))
5894            print '    z in [%f, %f], len(z) == %d' \
5895                  % (min(elevation), max(elevation), len(elevation))
5896            print 'geo_ref: ',geo_ref
5897            print '------------------------------------------------'
5898
5899        #z = resize(bath_grid, outfile.variables['z'][:].shape)
5900        outfile.variables['x'][:] = points[:,0] #- geo_ref.get_xllcorner()
5901        outfile.variables['y'][:] = points[:,1] #- geo_ref.get_yllcorner()
5902        outfile.variables['z'][:] = elevation
5903        outfile.variables['elevation'][:] = elevation  #FIXME HACK
5904        outfile.variables['volumes'][:] = volumes.astype(Int32) #On Opteron 64
5905
5906        q = 'elevation'
5907        # This updates the _range values
5908        outfile.variables[q + Write_sww.RANGE][0] = min(elevation)
5909        outfile.variables[q + Write_sww.RANGE][1] = max(elevation)
5910
5911
5912    ##
5913    # @brief Write the quantity data to the underlying file.
5914    # @param outfile Handle to open underlying file.
5915    # @param sww_precision Format of quantity data to write (default Float32).
5916    # @param slice_index
5917    # @param time
5918    # @param verbose True if this function is to be verbose.
5919    # @param **quant
5920    def store_quantities(self, outfile, sww_precision=Float32,
5921                         slice_index=None, time=None,
5922                         verbose=False, **quant):
5923        """
5924        Write the quantity info.
5925
5926        **quant is extra keyword arguments passed in. These must be
5927          the sww quantities, currently; stage, xmomentum, ymomentum.
5928
5929        if the time array is already been built, use the slice_index
5930        to specify the index.
5931
5932        Otherwise, use time to increase the time dimension
5933
5934        Maybe make this general, but the viewer assumes these quantities,
5935        so maybe we don't want it general - unless the viewer is general
5936
5937        precon
5938        triangulation and
5939        header have been called.
5940        """
5941
5942        if time is not None:
5943            file_time = outfile.variables['time']
5944            slice_index = len(file_time)
5945            file_time[slice_index] = time
5946
5947        # Write the conserved quantities from Domain.
5948        # Typically stage,  xmomentum, ymomentum
5949        # other quantities will be ignored, silently.
5950        # Also write the ranges: stage_range,
5951        # xmomentum_range and ymomentum_range
5952        for q in Write_sww.sww_quantities:
5953            if not quant.has_key(q):
5954                msg = 'SWW file can not write quantity %s' % q
5955                raise NewQuantity, msg
5956            else:
5957                q_values = quant[q]
5958                outfile.variables[q][slice_index] = \
5959                                q_values.astype(sww_precision)
5960
5961                # This updates the _range values
5962                q_range = outfile.variables[q + Write_sww.RANGE][:]
5963                q_values_min = min(q_values)
5964                if q_values_min < q_range[0]:
5965                    outfile.variables[q + Write_sww.RANGE][0] = q_values_min
5966                q_values_max = max(q_values)
5967                if q_values_max > q_range[1]:
5968                    outfile.variables[q + Write_sww.RANGE][1] = q_values_max
5969
5970    ##
5971    # @brief Print the quantities in the underlying file.
5972    # @param outfile UNUSED.
5973    def verbose_quantities(self, outfile):
5974        print '------------------------------------------------'
5975        print 'More Statistics:'
5976        for q in Write_sww.sww_quantities:
5977            print %s in [%f, %f]' % (q,
5978                                        outfile.variables[q+Write_sww.RANGE][0],
5979                                        outfile.variables[q+Write_sww.RANGE][1])
5980        print '------------------------------------------------'
5981
5982
5983##
5984# @brief Obsolete?
5985# @param outfile
5986# @param has
5987# @param uas
5988# @param vas
5989# @param elevation
5990# @param mean_stage
5991# @param zscale
5992# @param verbose
5993def obsolete_write_sww_time_slices(outfile, has, uas, vas, elevation,
5994                                   mean_stage=0, zscale=1,
5995                                   verbose=False):
5996    #Time stepping
5997    stage = outfile.variables['stage']
5998    xmomentum = outfile.variables['xmomentum']
5999    ymomentum = outfile.variables['ymomentum']
6000
6001    n = len(has)
6002    j = 0
6003    for ha, ua, va in map(None, has, uas, vas):
6004        if verbose and j % ((n+10)/10) == 0: print '  Doing %d of %d' % (j, n)
6005        w = zscale*ha + mean_stage
6006        stage[j] = w
6007        h = w - elevation
6008        xmomentum[j] = ua * h
6009        ymomentum[j] = -1 * va * h  # -1 since in mux files south is positive.
6010        j += 1
6011
6012
6013##
6014# @brief Convert a set of URS files to a text file.
6015# @param basename_in Stem path to the 3 URS files.
6016# @param location_index ??
6017def urs2txt(basename_in, location_index=None):
6018    """
6019    Not finished or tested
6020    """
6021
6022    files_in = [basename_in + WAVEHEIGHT_MUX_LABEL,
6023                basename_in + EAST_VELOCITY_LABEL,
6024                basename_in + NORTH_VELOCITY_LABEL]
6025    quantities = ['HA','UA','VA']
6026
6027    d = ","
6028
6029    # instantiate urs_points of the three mux files.
6030    mux = {}
6031    for quantity, file in map(None, quantities, files_in):
6032        mux[quantity] = Urs_points(file)
6033
6034    # Could check that the depth is the same. (hashing)
6035
6036    # handle to a mux file to do depth stuff
6037    a_mux = mux[quantities[0]]
6038
6039    # Convert to utm
6040    latitudes = a_mux.lonlatdep[:,1]
6041    longitudes = a_mux.lonlatdep[:,0]
6042    points_utm, zone = \
6043        convert_from_latlon_to_utm(latitudes=latitudes, longitudes=longitudes)
6044    depths = a_mux.lonlatdep[:,2]
6045
6046    # open the output text file, start writing.
6047    fid = open(basename_in + '.txt', 'w')
6048
6049    fid.write("zone: " + str(zone) + "\n")
6050
6051    if location_index is not None:
6052        #Title
6053        li = location_index
6054        fid.write('location_index' + d + 'lat' + d + 'long' + d +
6055                  'Easting' + d + 'Northing' + '\n')
6056        fid.write(str(li) + d + str(latitudes[li]) + d +
6057                  str(longitudes[li]) + d + str(points_utm[li][0]) + d +
6058                  str(points_utm[li][01]) + '\n')
6059
6060    # the non-time dependent stuff
6061    #Title
6062    fid.write('location_index' + d + 'lat' + d + 'long' + d +
6063              'Easting' + d + 'Northing' + d + 'depth m' + '\n')
6064    i = 0
6065    for depth, point_utm, lat, long in map(None, depths, points_utm,
6066                                           latitudes, longitudes):
6067
6068        fid.write(str(i) + d + str(lat) + d + str(long) + d +
6069                  str(point_utm[0]) + d + str(point_utm[01]) + d +
6070                  str(depth) + '\n')
6071        i += 1
6072
6073    #Time dependent
6074    if location_index is not None:
6075        time_step = a_mux.time_step
6076        i = 0
6077        #Title
6078        fid.write('time' + d + 'HA depth m' + d + 'UA momentum East x m/sec' +
6079                  d + 'VA momentum North y m/sec' + '\n')
6080        for HA, UA, VA in map(None, mux['HA'], mux['UA'], mux['VA']):
6081            fid.write(str(i*time_step) + d + str(HA[location_index]) + d +
6082                      str(UA[location_index]) + d +
6083                      str(VA[location_index]) + '\n')
6084            i += 1
6085
6086
6087##
6088# @brief A class to write STS files.
6089class Write_sts:
6090    sts_quantities = ['stage','xmomentum','ymomentum']
6091    RANGE = '_range'
6092    EXTREMA = ':extrema'
6093
6094    ##
6095    # @brief Instantiate this STS writer class.
6096    def __init__(self):
6097        pass
6098
6099    ##
6100    # @brief Write a header to the underlying data file.
6101    # @param outfile Handle to open file to write.
6102    # @param times A list of the time slice times *or* a start time.
6103    # @param number_of_points The number of URS gauge sites.
6104    # @param description Description string to write into the STS file.
6105    # @param sts_precision Format of data to write (default Float32).
6106    # @param verbose True if this function is to be verbose.
6107    # @note If 'times' is a list, the info will be made relative.
6108    def store_header(self,
6109                     outfile,
6110                     times,
6111                     number_of_points,
6112                     description='Converted from URS mux2 format',
6113                     sts_precision=Float32,
6114                     verbose=False):
6115        """
6116        outfile - the name of the file that will be written
6117        times - A list of the time slice times OR a start time
6118        Note, if a list is given the info will be made relative.
6119        number_of_points - the number of urs gauges sites
6120        """
6121
6122        outfile.institution = 'Geoscience Australia'
6123        outfile.description = description
6124
6125        try:
6126            revision_number = get_revision_number()
6127        except:
6128            revision_number = None
6129
6130        # Allow None to be stored as a string
6131        outfile.revision_number = str(revision_number)
6132
6133        # Start time in seconds since the epoch (midnight 1/1/1970)
6134        # This is being used to seperate one number from a list.
6135        # what it is actually doing is sorting lists from numeric arrays.
6136        if type(times) is list or type(times) is ArrayType:
6137            number_of_times = len(times)
6138            times = ensure_numeric(times)
6139            if number_of_times == 0:
6140                starttime = 0
6141            else:
6142                starttime = times[0]
6143                times = times - starttime  #Store relative times
6144        else:
6145            number_of_times = 0
6146            starttime = times
6147
6148        outfile.starttime = starttime
6149
6150        # Dimension definitions
6151        outfile.createDimension('number_of_points', number_of_points)
6152        outfile.createDimension('number_of_timesteps', number_of_times)
6153        outfile.createDimension('numbers_in_range', 2)
6154
6155        # Variable definitions
6156        outfile.createVariable('permutation', Int, ('number_of_points',))
6157        outfile.createVariable('x', sts_precision, ('number_of_points',))
6158        outfile.createVariable('y', sts_precision, ('number_of_points',))
6159        outfile.createVariable('elevation',sts_precision, ('number_of_points',))
6160
6161        q = 'elevation'
6162        outfile.createVariable(q + Write_sts.RANGE, sts_precision,
6163                               ('numbers_in_range',))
6164
6165        # Initialise ranges with small and large sentinels.
6166        # If this was in pure Python we could have used None sensibly
6167        outfile.variables[q + Write_sts.RANGE][0] = max_float  # Min
6168        outfile.variables[q + Write_sts.RANGE][1] = -max_float # Max
6169
6170        # Doing sts_precision instead of Float gives cast errors.
6171        outfile.createVariable('time', Float, ('number_of_timesteps',))
6172
6173        for q in Write_sts.sts_quantities:
6174            outfile.createVariable(q, sts_precision, ('number_of_timesteps',
6175                                                      'number_of_points'))
6176            outfile.createVariable(q + Write_sts.RANGE, sts_precision,
6177                                   ('numbers_in_range',))
6178            # Initialise ranges with small and large sentinels.
6179            # If this was in pure Python we could have used None sensibly
6180            outfile.variables[q + Write_sts.RANGE][0] = max_float  # Min
6181            outfile.variables[q + Write_sts.RANGE][1] = -max_float # Max
6182
6183        if type(times) is list or type(times) is ArrayType:
6184            outfile.variables['time'][:] = times    #Store time relative
6185
6186        if verbose:
6187            print '------------------------------------------------'
6188            print 'Statistics:'
6189            print '    t in [%f, %f], len(t) == %d' \
6190                  % (min(times.flat), max(times.flat), len(times.flat))
6191
6192    ##
6193    # @brief
6194    # @param outfile
6195    # @param points_utm
6196    # @param elevation
6197    # @param zone
6198    # @param new_origin
6199    # @param points_georeference
6200    # @param verbose True if this function is to be verbose.
6201    def store_points(self,
6202                     outfile,
6203                     points_utm,
6204                     elevation, zone=None, new_origin=None,
6205                     points_georeference=None, verbose=False):
6206
6207        """
6208        points_utm - currently a list or array of the points in UTM.
6209        points_georeference - the georeference of the points_utm
6210
6211        How about passing new_origin and current_origin.
6212        If you get both, do a convertion from the old to the new.
6213
6214        If you only get new_origin, the points are absolute,
6215        convert to relative
6216
6217        if you only get the current_origin the points are relative, store
6218        as relative.
6219
6220        if you get no georefs create a new georef based on the minimums of
6221        points_utm.  (Another option would be to default to absolute)
6222
6223        Yes, and this is done in another part of the code.
6224        Probably geospatial.
6225
6226        If you don't supply either geo_refs, then supply a zone. If not
6227        the default zone will be used.
6228
6229        precondition:
6230             header has been called.
6231        """
6232
6233        number_of_points = len(points_utm)
6234        points_utm = array(points_utm)
6235
6236        # given the two geo_refs and the points, do the stuff
6237        # described in the method header
6238        points_georeference = ensure_geo_reference(points_georeference)
6239        new_origin = ensure_geo_reference(new_origin)
6240
6241        if new_origin is None and points_georeference is not None:
6242            points = points_utm
6243            geo_ref = points_georeference
6244        else:
6245            if new_origin is None:
6246                new_origin = Geo_reference(zone, min(points_utm[:,0]),
6247                                                 min(points_utm[:,1]))
6248            points = new_origin.change_points_geo_ref(points_utm,
6249                                                      points_georeference)
6250            geo_ref = new_origin
6251
6252        # At this stage I need a georef and points
6253        # the points are relative to the georef
6254        geo_ref.write_NetCDF(outfile)
6255
6256        x =  points[:,0]
6257        y =  points[:,1]
6258        z = outfile.variables['z'][:]
6259
6260        if verbose:
6261            print '------------------------------------------------'
6262            print 'More Statistics:'
6263            print '  Extent (/lon):'
6264            print '    x in [%f, %f], len(lat) == %d' \
6265                  % (min(x), max(x), len(x))
6266            print '    y in [%f, %f], len(lon) == %d' \
6267                  % (min(y), max(y), len(y))
6268            print '    z in [%f, %f], len(z) == %d' \
6269                  % (min(elevation), max(elevation), len(elevation))
6270            print 'geo_ref: ',geo_ref
6271            print '------------------------------------------------'
6272
6273        #z = resize(bath_grid,outfile.variables['z'][:].shape)
6274        outfile.variables['x'][:] = points[:,0] #- geo_ref.get_xllcorner()
6275        outfile.variables['y'][:] = points[:,1] #- geo_ref.get_yllcorner()
6276        outfile.variables['z'][:] = elevation
6277        outfile.variables['elevation'][:] = elevation  #FIXME HACK4
6278
6279        # This updates the _range values
6280        q = 'elevation'
6281        outfile.variables[q + Write_sts.RANGE][0] = min(elevation)
6282        outfile.variables[q + Write_sts.RANGE][1] = max(elevation)
6283
6284    ##
6285    # @brief Store quantity data in the underlying file.
6286    # @param outfile
6287    # @param sts_precision
6288    # @param slice_index
6289    # @param time
6290    # @param verboseTrue if this function is to be verbose.
6291    # @param **quant Extra keyword args.
6292    def store_quantities(self, outfile, sts_precision=Float32,
6293                         slice_index=None, time=None,
6294                         verbose=False, **quant):
6295        """Write the quantity info.
6296
6297        **quant is extra keyword arguments passed in. These must be
6298          the sts quantities, currently; stage.
6299
6300        if the time array is already been built, use the slice_index
6301        to specify the index.
6302
6303        Otherwise, use time to increase the time dimension
6304
6305        Maybe make this general, but the viewer assumes these quantities,
6306        so maybe we don't want it general - unless the viewer is general
6307
6308        precondition:
6309            triangulation and header have been called.
6310        """
6311
6312        if time is not None:
6313            file_time = outfile.variables['time']
6314            slice_index = len(file_time)
6315            file_time[slice_index] = time
6316
6317        # Write the conserved quantities from Domain.
6318        # Typically stage, xmomentum, ymomentum
6319        # other quantities will be ignored, silently.
6320        # Also write the ranges: stage_range
6321        for q in Write_sts.sts_quantities:
6322            if not quant.has_key(q):
6323                msg = 'STS file can not write quantity %s' % q
6324                raise NewQuantity, msg
6325            else:
6326                q_values = quant[q]
6327                outfile.variables[q][slice_index] = \
6328                                q_values.astype(sts_precision)
6329
6330                # This updates the _range values
6331                q_range = outfile.variables[q + Write_sts.RANGE][:]
6332                q_values_min = min(q_values)
6333                if q_values_min < q_range[0]:
6334                    outfile.variables[q + Write_sts.RANGE][0] = q_values_min
6335                q_values_max = max(q_values)
6336                if q_values_max > q_range[1]:
6337                    outfile.variables[q + Write_sts.RANGE][1] = q_values_max
6338
6339
6340##
6341# @brief
6342class Urs_points:
6343    """
6344    Read the info in URS mux files.
6345
6346    for the quantities here's a correlation between the file names and
6347    what they mean;
6348    z-mux is height above sea level, m
6349    e-mux is velocity is Eastern direction, m/s
6350    n-mux is velocity is Northern direction, m/s
6351    """
6352
6353    ##
6354    # @brief Initialize this instance of Urs_points.
6355    # @param urs_file Path to the underlying data file.
6356    def __init__(self, urs_file):
6357        self.iterated = False
6358        columns = 3                         # long, lat , depth
6359        mux_file = open(urs_file, 'rb')
6360
6361        # Number of points/stations
6362        (self.points_num,) = unpack('i', mux_file.read(4))
6363
6364        # nt, int - Number of time steps
6365        (self.time_step_count,) = unpack('i', mux_file.read(4))
6366        #dt, float - time step, seconds
6367        (self.time_step,) = unpack('f', mux_file.read(4))
6368        msg = "Bad data in the urs file."
6369        if self.points_num < 0:
6370            mux_file.close()
6371            raise ANUGAError, msg
6372        if self.time_step_count < 0:
6373