source: anuga_core/source/anuga/shallow_water/data_manager.py @ 5880

Last change on this file since 5880 was 5880, checked in by ole, 15 years ago

Added test for platform problem with read_mux.
This one passes on Linux.

File size: 229.9 KB
Line 
1"""datamanager.py - input output for AnuGA
2
3
4This module takes care of reading and writing datafiles such as topograhies,
5model output, etc
6
7
8Formats used within AnuGA:
9
10.sww: Netcdf format for storing model output f(t,x,y)
11.tms: Netcdf format for storing time series f(t)
12
13.csv: ASCII format for storing arbitrary points and associated attributes
14.pts: NetCDF format for storing arbitrary points and associated attributes
15
16.asc: ASCII format of regular DEMs as output from ArcView
17.prj: Associated ArcView file giving more meta data for asc format
18.ers: ERMapper header format of regular DEMs for ArcView
19
20.dem: NetCDF representation of regular DEM data
21
22.tsh: ASCII format for storing meshes and associated boundary and region info
23.msh: NetCDF format for storing meshes and associated boundary and region info
24
25.nc: Native ferret NetCDF format
26.geo: Houdinis ascii geometry format (?)
27
28
29A typical dataflow can be described as follows
30
31Manually created files:
32ASC, PRJ:     Digital elevation models (gridded)
33TSH:          Triangular meshes (e.g. created from anuga.pmesh)
34NC            Model outputs for use as boundary conditions (e.g from MOST)
35
36
37AUTOMATICALLY CREATED FILES:
38
39ASC, PRJ  ->  DEM  ->  PTS: Conversion of DEM's to native pts file
40
41NC -> SWW: Conversion of MOST bundary files to boundary sww
42
43PTS + TSH -> TSH with elevation: Least squares fit
44
45TSH -> SWW:  Conversion of TSH to sww viewable using Swollen
46
47TSH + Boundary SWW -> SWW: Simluation using abstract_2d_finite_volumes
48
49"""
50
51# This file was reverted from changeset:5484 to changeset:5470 on 10th July
52# by Ole.
53
54import exceptions
55class TitleValueError(exceptions.Exception): pass
56class DataMissingValuesError(exceptions.Exception): pass
57class DataFileNotOpenError(exceptions.Exception): pass
58class DataTimeError(exceptions.Exception): pass
59class DataDomainError(exceptions.Exception): pass
60class NewQuantity(exceptions.Exception): pass
61
62
63
64import csv
65import os, sys
66import shutil
67from struct import unpack
68import array as p_array
69#import time, os
70from os import sep, path, remove, mkdir, access, F_OK, W_OK, getcwd
71
72
73from Numeric import concatenate, array, Float, Int, Int32, resize, \
74     sometrue, searchsorted, zeros, allclose, around, reshape, \
75     transpose, sort, NewAxis, ArrayType, compress, take, arange, \
76     argmax, alltrue, shape, Float32, size
77
78import string
79
80from Scientific.IO.NetCDF import NetCDFFile
81#from shutil import copy
82from os.path import exists, basename, join
83from os import getcwd
84
85
86from anuga.coordinate_transforms.redfearn import redfearn, \
87     convert_from_latlon_to_utm
88from anuga.coordinate_transforms.geo_reference import Geo_reference, \
89     write_NetCDF_georeference, ensure_geo_reference
90from anuga.geospatial_data.geospatial_data import Geospatial_data,\
91     ensure_absolute
92from anuga.config import minimum_storable_height as default_minimum_storable_height
93from anuga.config import max_float
94from anuga.utilities.numerical_tools import ensure_numeric,  mean
95from anuga.caching.caching import myhash
96from anuga.utilities.anuga_exceptions import ANUGAError
97from anuga.shallow_water import Domain
98from anuga.abstract_2d_finite_volumes.pmesh2domain import \
99     pmesh_to_domain_instance
100from anuga.abstract_2d_finite_volumes.util import get_revision_number, \
101     remove_lone_verts, sww2timeseries, get_centroid_values
102     
103from anuga.abstract_2d_finite_volumes.neighbour_mesh import segment_midpoints
104from anuga.load_mesh.loadASCII import export_mesh_file
105from anuga.utilities.polygon import intersection
106
107
108# formula mappings
109
110quantity_formula = {'momentum':'(xmomentum**2 + ymomentum**2)**0.5',
111                    'depth':'stage-elevation',
112                    'speed': \
113 '(xmomentum**2 + ymomentum**2)**0.5/(stage-elevation+1.e-6/(stage-elevation))'}
114
115
116   
117def make_filename(s):
118    """Transform argument string into a Sexsuitable filename
119    """
120
121    s = s.strip()
122    s = s.replace(' ', '_')
123    s = s.replace('(', '')
124    s = s.replace(')', '')
125    s = s.replace('__', '_')
126
127    return s
128
129
130def check_dir(path, verbose=None):
131    """Check that specified path exists.
132    If path does not exist it will be created if possible
133
134    USAGE:
135       checkdir(path, verbose):
136
137    ARGUMENTS:
138        path -- Directory
139        verbose -- Flag verbose output (default: None)
140
141    RETURN VALUE:
142        Verified path including trailing separator
143
144    """
145
146    import os.path
147
148    if sys.platform in ['nt', 'dos', 'win32', 'what else?']:
149        unix = 0
150    else:
151        unix = 1
152
153
154    if path[-1] != os.sep:
155        path = path + os.sep  # Add separator for directories
156
157    path = os.path.expanduser(path) # Expand ~ or ~user in pathname
158    if not (os.access(path,os.R_OK and os.W_OK) or path == ''):
159        try:
160            exitcode=os.mkdir(path)
161
162            # Change access rights if possible
163            #
164            if unix:
165                exitcode=os.system('chmod 775 '+path)
166            else:
167                pass  # FIXME: What about acces rights under Windows?
168
169            if verbose: print 'MESSAGE: Directory', path, 'created.'
170
171        except:
172            print 'WARNING: Directory', path, 'could not be created.'
173            if unix:
174                path = '/tmp/'
175            else:
176                path = 'C:'
177
178            print 'Using directory %s instead' %path
179
180    return(path)
181
182
183
184def del_dir(path):
185    """Recursively delete directory path and all its contents
186    """
187
188    import os
189
190    if os.path.isdir(path):
191        for file in os.listdir(path):
192            X = os.path.join(path, file)
193
194
195            if os.path.isdir(X) and not os.path.islink(X):
196                del_dir(X)
197            else:
198                try:
199                    os.remove(X)
200                except:
201                    print "Could not remove file %s" %X
202
203        os.rmdir(path)
204       
205       
206# ANOTHER OPTION, IF NEED IN THE FUTURE, Nick B 7/2007   
207def rmgeneric(path, __func__,verbose=False):
208    ERROR_STR= """Error removing %(path)s, %(error)s """
209
210    try:
211        __func__(path)
212        if verbose: print 'Removed ', path
213    except OSError, (errno, strerror):
214        print ERROR_STR % {'path' : path, 'error': strerror }
215           
216def removeall(path,verbose=False):
217
218    if not os.path.isdir(path):
219        return
220   
221    files=os.listdir(path)
222
223    for x in files:
224        fullpath=os.path.join(path, x)
225        if os.path.isfile(fullpath):
226            f=os.remove
227            rmgeneric(fullpath, f)
228        elif os.path.isdir(fullpath):
229            removeall(fullpath)
230            f=os.rmdir
231            rmgeneric(fullpath, f,verbose)
232
233
234
235def create_filename(datadir, filename, format, size=None, time=None):
236
237    import os
238    #from anuga.config import data_dir
239
240    FN = check_dir(datadir) + filename
241
242    if size is not None:
243        FN += '_size%d' %size
244
245    if time is not None:
246        FN += '_time%.2f' %time
247
248    FN += '.' + format
249    return FN
250
251
252def get_files(datadir, filename, format, size):
253    """Get all file (names) with given name, size and format
254    """
255
256    import glob
257
258    import os
259    #from anuga.config import data_dir
260
261    dir = check_dir(datadir)
262
263    pattern = dir + os.sep + filename + '_size=%d*.%s' %(size, format)
264    return glob.glob(pattern)
265
266
267
268#Generic class for storing output to e.g. visualisation or checkpointing
269class Data_format:
270    """Generic interface to data formats
271    """
272
273
274    def __init__(self, domain, extension, mode = 'w'):
275        assert mode in ['r', 'w', 'a'], '''Mode %s must be either:''' %mode +\
276                                        '''   'w' (write)'''+\
277                                        '''   'r' (read)''' +\
278                                        '''   'a' (append)'''
279
280        #Create filename
281        self.filename = create_filename(domain.get_datadir(),
282                                        domain.get_name(), extension)
283
284        #print 'F', self.filename
285        self.timestep = 0
286        self.domain = domain
287       
288
289
290        # Exclude ghosts in case this is a parallel domain
291        self.number_of_nodes = domain.number_of_full_nodes       
292        self.number_of_volumes = domain.number_of_full_triangles
293        #self.number_of_volumes = len(domain)       
294
295
296
297
298        #FIXME: Should we have a general set_precision function?
299
300
301
302#Class for storing output to e.g. visualisation
303class Data_format_sww(Data_format):
304    """Interface to native NetCDF format (.sww) for storing model output
305
306    There are two kinds of data
307
308    1: Constant data: Vertex coordinates and field values. Stored once
309    2: Variable data: Conserved quantities. Stored once per timestep.
310
311    All data is assumed to reside at vertex locations.
312    """
313
314
315    def __init__(self, domain, mode = 'w',\
316                 max_size = 2000000000,
317                 recursion = False):
318        from Scientific.IO.NetCDF import NetCDFFile
319        from Numeric import Int, Float, Float32
320
321        self.precision = Float32 #Use single precision for quantities
322        if hasattr(domain, 'max_size'):
323            self.max_size = domain.max_size #file size max is 2Gig
324        else:
325            self.max_size = max_size
326        self.recursion = recursion
327        self.mode = mode
328
329        Data_format.__init__(self, domain, 'sww', mode)
330
331        if hasattr(domain, 'minimum_storable_height'):
332            self.minimum_storable_height = domain.minimum_storable_height
333        else:
334            self.minimum_storable_height = default_minimum_storable_height
335
336        # NetCDF file definition
337        fid = NetCDFFile(self.filename, mode)
338
339        if mode == 'w':
340            description = 'Output from anuga.abstract_2d_finite_volumes suitable for plotting'
341            self.writer = Write_sww()
342            self.writer.store_header(fid,
343                                     domain.starttime,
344                                     self.number_of_volumes,
345                                     self.domain.number_of_full_nodes,
346                                     description=description,
347                                     smoothing=domain.smooth,
348                                     order=domain.default_order,
349                                     sww_precision=self.precision)
350
351            # Extra optional information
352            if hasattr(domain, 'texture'):
353                fid.texture = domain.texture
354
355            if domain.quantities_to_be_monitored is not None:
356                fid.createDimension('singleton', 1)
357                fid.createDimension('two', 2)               
358
359                poly = domain.monitor_polygon
360                if poly is not None:
361                    N = len(poly)
362                    fid.createDimension('polygon_length', N)
363                    fid.createVariable('extrema.polygon',
364                                       self.precision,
365                                       ('polygon_length',
366                                        'two'))
367                    fid.variables['extrema.polygon'][:] = poly                                   
368
369                   
370                interval = domain.monitor_time_interval
371                if interval is not None:
372                    fid.createVariable('extrema.time_interval',
373                                       self.precision,
374                                       ('two',))
375                    fid.variables['extrema.time_interval'][:] = interval
376
377               
378                for q in domain.quantities_to_be_monitored:
379                    #print 'doing', q
380                    fid.createVariable(q+'.extrema', self.precision,
381                                       ('numbers_in_range',))
382                    fid.createVariable(q+'.min_location', self.precision,
383                                       ('numbers_in_range',))
384                    fid.createVariable(q+'.max_location', self.precision,
385                                       ('numbers_in_range',))
386                    fid.createVariable(q+'.min_time', self.precision,
387                                       ('singleton',))
388                    fid.createVariable(q+'.max_time', self.precision,
389                                       ('singleton',))
390
391                   
392        fid.close()
393
394
395    def store_connectivity(self):
396        """Specialisation of store_connectivity for net CDF format
397
398        Writes x,y,z coordinates of triangles constituting
399        the bed elevation.
400        """
401
402        from Scientific.IO.NetCDF import NetCDFFile
403
404        from Numeric import concatenate, Int
405
406        domain = self.domain
407
408        #Get NetCDF
409        fid = NetCDFFile(self.filename, 'a')  #Open existing file for append
410
411        # Get the variables
412        x = fid.variables['x']
413        y = fid.variables['y']
414        z = fid.variables['elevation']
415
416        volumes = fid.variables['volumes']
417
418        # Get X, Y and bed elevation Z
419        Q = domain.quantities['elevation']
420        X,Y,Z,V = Q.get_vertex_values(xy=True,
421                                      precision=self.precision)
422
423        #
424        points = concatenate( (X[:,NewAxis],Y[:,NewAxis]), axis=1 )
425        self.writer.store_triangulation(fid,
426                                        points,
427                                        V.astype(volumes.typecode()),
428                                        Z,
429                                        points_georeference= \
430                                        domain.geo_reference)
431
432        # Close
433        fid.close()
434
435
436    def store_timestep(self, names=None):
437        """Store time and named quantities to file
438        """
439       
440        from Scientific.IO.NetCDF import NetCDFFile
441        import types
442        from time import sleep
443        from os import stat
444
445        from Numeric import choose
446
447
448        if names is None:
449            # Standard shallow water wave equation quantitites in ANUGA
450            names = ['stage', 'xmomentum', 'ymomentum']
451       
452        # Get NetCDF       
453        retries = 0
454        file_open = False
455        while not file_open and retries < 10:
456            try:
457                fid = NetCDFFile(self.filename, 'a') # Open existing file
458            except IOError:
459                # This could happen if someone was reading the file.
460                # In that case, wait a while and try again
461                msg = 'Warning (store_timestep): File %s could not be opened'\
462                      %self.filename
463                msg += ' - trying step %s again' %self.domain.time
464                print msg
465                retries += 1
466                sleep(1)
467            else:
468                file_open = True
469
470        if not file_open:
471            msg = 'File %s could not be opened for append' %self.filename
472            raise DataFileNotOpenError, msg
473
474
475
476        # Check to see if the file is already too big:
477        time = fid.variables['time']
478        i = len(time)+1
479        file_size = stat(self.filename)[6]
480        file_size_increase =  file_size/i
481        if file_size + file_size_increase > self.max_size*(2**self.recursion):
482            # In order to get the file name and start time correct,
483            # I change the domain.filename and domain.starttime.
484            # This is the only way to do this without changing
485            # other modules (I think).
486
487            # Write a filename addon that won't break swollens reader
488            # (10.sww is bad)
489            filename_ext = '_time_%s'%self.domain.time
490            filename_ext = filename_ext.replace('.', '_')
491           
492            # Remember the old filename, then give domain a
493            # name with the extension
494            old_domain_filename = self.domain.get_name()
495            if not self.recursion:
496                self.domain.set_name(old_domain_filename+filename_ext)
497
498
499            # Change the domain starttime to the current time
500            old_domain_starttime = self.domain.starttime
501            self.domain.starttime = self.domain.time
502
503            # Build a new data_structure.
504            next_data_structure=\
505                Data_format_sww(self.domain, mode=self.mode,\
506                                max_size = self.max_size,\
507                                recursion = self.recursion+1)
508            if not self.recursion:
509                print '    file_size = %s'%file_size
510                print '    saving file to %s'%next_data_structure.filename
511            #set up the new data_structure
512            self.domain.writer = next_data_structure
513
514            #FIXME - could be cleaner to use domain.store_timestep etc.
515            next_data_structure.store_connectivity()
516            next_data_structure.store_timestep(names)
517            fid.sync()
518            fid.close()
519
520            #restore the old starttime and filename
521            self.domain.starttime = old_domain_starttime
522            self.domain.set_name(old_domain_filename)           
523        else:
524            self.recursion = False
525            domain = self.domain
526
527            # Get the variables
528            time = fid.variables['time']
529            stage = fid.variables['stage']
530            xmomentum = fid.variables['xmomentum']
531            ymomentum = fid.variables['ymomentum']
532            i = len(time)
533            if type(names) not in [types.ListType, types.TupleType]:
534                names = [names]
535
536            if 'stage' in names and 'xmomentum' in names and \
537               'ymomentum' in names:
538
539                # Get stage, elevation, depth and select only those
540                # values where minimum_storable_height is exceeded
541                Q = domain.quantities['stage']
542                A, _ = Q.get_vertex_values(xy = False,
543                                           precision = self.precision)
544                z = fid.variables['elevation']
545
546                storable_indices = A-z[:] >= self.minimum_storable_height
547                stage = choose(storable_indices, (z[:], A))
548               
549                # Define a zero vector of same size and type as A
550                # for use with momenta
551                null = zeros(size(A), A.typecode())
552               
553                # Get xmomentum where depth exceeds minimum_storable_height
554                Q = domain.quantities['xmomentum']
555                xmom, _ = Q.get_vertex_values(xy = False,
556                                              precision = self.precision)
557                xmomentum = choose(storable_indices, (null, xmom))
558               
559
560                # Get ymomentum where depth exceeds minimum_storable_height
561                Q = domain.quantities['ymomentum']
562                ymom, _ = Q.get_vertex_values(xy = False,
563                                              precision = self.precision)
564                ymomentum = choose(storable_indices, (null, ymom))               
565               
566                # Write quantities to NetCDF
567                self.writer.store_quantities(fid, 
568                                             time=self.domain.time,
569                                             sww_precision=self.precision,
570                                             stage=stage,
571                                             xmomentum=xmomentum,
572                                             ymomentum=ymomentum)
573            else:
574                msg = 'Quantities stored must be: stage, xmomentum, ymomentum.'
575                msg += ' Instead I got: ' + str(names)
576                raise Exception, msg
577           
578
579
580            # Update extrema if requested
581            domain = self.domain
582            if domain.quantities_to_be_monitored is not None:
583                for q, info in domain.quantities_to_be_monitored.items():
584
585                    if info['min'] is not None:
586                        fid.variables[q + '.extrema'][0] = info['min']
587                        fid.variables[q + '.min_location'][:] =\
588                                        info['min_location']
589                        fid.variables[q + '.min_time'][0] = info['min_time']
590                       
591                    if info['max'] is not None:
592                        fid.variables[q + '.extrema'][1] = info['max']
593                        fid.variables[q + '.max_location'][:] =\
594                                        info['max_location']
595                        fid.variables[q + '.max_time'][0] = info['max_time']
596
597           
598
599            # Flush and close
600            fid.sync()
601            fid.close()
602
603
604
605# Class for handling checkpoints data
606class Data_format_cpt(Data_format):
607    """Interface to native NetCDF format (.cpt)
608    """
609
610
611    def __init__(self, domain, mode = 'w'):
612        from Scientific.IO.NetCDF import NetCDFFile
613        from Numeric import Int, Float, Float
614
615        self.precision = Float #Use full precision
616
617        Data_format.__init__(self, domain, 'sww', mode)
618
619
620        # NetCDF file definition
621        fid = NetCDFFile(self.filename, mode)
622
623        if mode == 'w':
624            #Create new file
625            fid.institution = 'Geoscience Australia'
626            fid.description = 'Checkpoint data'
627            #fid.smooth = domain.smooth
628            fid.order = domain.default_order
629
630            # dimension definitions
631            fid.createDimension('number_of_volumes', self.number_of_volumes)
632            fid.createDimension('number_of_vertices', 3)
633
634            #Store info at all vertices (no smoothing)
635            fid.createDimension('number_of_points', 3*self.number_of_volumes)
636            fid.createDimension('number_of_timesteps', None) #extensible
637
638            # variable definitions
639
640            #Mesh
641            fid.createVariable('x', self.precision, ('number_of_points',))
642            fid.createVariable('y', self.precision, ('number_of_points',))
643
644
645            fid.createVariable('volumes', Int, ('number_of_volumes',
646                                                'number_of_vertices'))
647
648            fid.createVariable('time', self.precision,
649                               ('number_of_timesteps',))
650
651            #Allocate space for all quantities
652            for name in domain.quantities.keys():
653                fid.createVariable(name, self.precision,
654                                   ('number_of_timesteps',
655                                    'number_of_points'))
656
657        #Close
658        fid.close()
659
660
661    def store_checkpoint(self):
662        """
663        Write x,y coordinates of triangles.
664        Write connectivity (
665        constituting
666        the bed elevation.
667        """
668
669        from Scientific.IO.NetCDF import NetCDFFile
670
671        from Numeric import concatenate
672
673        domain = self.domain
674
675        #Get NetCDF
676        fid = NetCDFFile(self.filename, 'a')  #Open existing file for append
677
678        # Get the variables
679        x = fid.variables['x']
680        y = fid.variables['y']
681
682        volumes = fid.variables['volumes']
683
684        # Get X, Y and bed elevation Z
685        Q = domain.quantities['elevation']
686        X,Y,Z,V = Q.get_vertex_values(xy=True,
687                      precision = self.precision)
688
689
690
691        x[:] = X.astype(self.precision)
692        y[:] = Y.astype(self.precision)
693        z[:] = Z.astype(self.precision)
694
695        volumes[:] = V
696
697        #Close
698        fid.close()
699
700
701    def store_timestep(self, name):
702        """Store time and named quantity to file
703        """
704        from Scientific.IO.NetCDF import NetCDFFile
705        from time import sleep
706
707        #Get NetCDF
708        retries = 0
709        file_open = False
710        while not file_open and retries < 10:
711            try:
712                fid = NetCDFFile(self.filename, 'a') #Open existing file
713            except IOError:
714                #This could happen if someone was reading the file.
715                #In that case, wait a while and try again
716                msg = 'Warning (store_timestep): File %s could not be opened'\
717                  %self.filename
718                msg += ' - trying again'
719                print msg
720                retries += 1
721                sleep(1)
722            else:
723                file_open = True
724
725        if not file_open:
726            msg = 'File %s could not be opened for append' %self.filename
727            raise DataFileNotOPenError, msg
728
729
730        domain = self.domain
731
732        # Get the variables
733        time = fid.variables['time']
734        stage = fid.variables['stage']
735        i = len(time)
736
737        #Store stage
738        time[i] = self.domain.time
739
740        # Get quantity
741        Q = domain.quantities[name]
742        A,V = Q.get_vertex_values(xy=False,
743                                  precision = self.precision)
744
745        stage[i,:] = A.astype(self.precision)
746
747        #Flush and close
748        fid.sync()
749        fid.close()
750
751
752#### NED is national exposure database (name changed to NEXIS)
753   
754LAT_TITLE = 'LATITUDE'
755LONG_TITLE = 'LONGITUDE'
756X_TITLE = 'x'
757Y_TITLE = 'y'
758class Exposure_csv:
759    def __init__(self,file_name, latitude_title=LAT_TITLE,
760                 longitude_title=LONG_TITLE, is_x_y_locations=None,
761                 x_title=X_TITLE, y_title=Y_TITLE,
762                 refine_polygon=None, title_check_list=None):
763        """
764        This class is for handling the exposure csv file.
765        It reads the file in and converts the lats and longs to a geospatial
766        data object.
767        Use the methods to read and write columns.
768
769        The format of the csv files it reads is;
770           The first row is a title row.
771           comma's are the delimiters
772           each column is a 'set' of data
773
774        Feel free to use/expand it to read other csv files.
775           
776           
777        It is not for adding and deleting rows
778       
779        Can geospatial handle string attributes? It's not made for them.
780        Currently it can't load and save string att's.
781
782        So just use geospatial to hold the x, y and georef? Bad, since
783        different att's are in diferent structures.  Not so bad, the info
784        to write if the .csv file is saved is in attribute_dic
785
786        The location info is in the geospatial attribute.
787       
788       
789        """
790        self._file_name = file_name
791        self._geospatial = None #
792
793        # self._attribute_dic is a dictionary.
794        #The keys are the column titles.
795        #The values are lists of column data
796       
797        # self._title_index_dic is a dictionary.
798        #The keys are the column titles.
799        #The values are the index positions of file columns.
800        self._attribute_dic, self._title_index_dic = \
801            csv2dict(self._file_name, title_check_list=title_check_list)
802        try:
803            #Have code here that handles caps or lower
804            lats = self._attribute_dic[latitude_title]
805            longs = self._attribute_dic[longitude_title]
806           
807        except KeyError:
808            # maybe a warning..
809            #Let's see if this works..
810            if False != is_x_y_locations:
811                is_x_y_locations = True
812            pass
813        else:
814            self._geospatial = Geospatial_data(latitudes = lats,
815                 longitudes = longs)
816
817        if is_x_y_locations is True:
818            if self._geospatial is not None:
819                pass #fixme throw an error
820            try:
821                xs = self._attribute_dic[x_title]
822                ys = self._attribute_dic[y_title]
823                points = [[float(i),float(j)] for i,j in map(None,xs,ys)]
824            except KeyError:
825                # maybe a warning..
826                msg = "Could not find location information."
827                raise TitleValueError, msg
828            else:
829                self._geospatial = Geospatial_data(data_points=points)
830           
831        # create a list of points that are in the refining_polygon
832        # described by a list of indexes representing the points
833
834    def __cmp__(self, other):
835        #print "self._attribute_dic",self._attribute_dic
836        #print "other._attribute_dic",other._attribute_dic
837        #print "self._title_index_dic", self._title_index_dic
838        #print "other._title_index_dic", other._title_index_dic
839       
840        #check that a is an instance of this class
841        if isinstance(self, type(other)):
842            result = cmp(self._attribute_dic, other._attribute_dic)
843            if result <>0:
844                return result
845            # The order of the columns is important. Therefore..
846            result = cmp(self._title_index_dic, other._title_index_dic)
847            if result <>0:
848                return result
849            for self_ls, other_ls in map(None,self._attribute_dic, \
850                    other._attribute_dic):
851                result = cmp(self._attribute_dic[self_ls],
852                             other._attribute_dic[other_ls])
853                if result <>0:
854                    return result
855            return 0
856        else:
857            return 1
858   
859
860    def get_column(self, column_name, use_refind_polygon=False):
861        """
862        Given a column name return a list of the column values
863
864        Note, the type of the values will be String!
865        do this to change a list of strings to a list of floats
866        time = [float(x) for x in time]
867       
868        Not implemented:
869        if use_refind_polygon is True, only return values in the
870        refined polygon
871        """
872        if not self._attribute_dic.has_key(column_name):
873            msg = 'Therer is  no column called %s!' %column_name
874            raise TitleValueError, msg
875        return self._attribute_dic[column_name]
876
877
878    def get_value(self, value_column_name,
879                  known_column_name,
880                  known_values,
881                  use_refind_polygon=False):
882        """
883        Do linear interpolation on the known_colum, using the known_value,
884        to return a value of the column_value_name.
885        """
886        pass
887
888
889    def get_location(self, use_refind_polygon=False):
890        """
891        Return a geospatial object which describes the
892        locations of the location file.
893
894        Note, if there is not location info, this returns None.
895       
896        Not implemented:
897        if use_refind_polygon is True, only return values in the
898        refined polygon
899        """
900        return self._geospatial
901
902    def set_column(self, column_name, column_values, overwrite=False):
903        """
904        Add a column to the 'end' (with the right most column being the end)
905        of the csv file.
906
907        Set overwrite to True if you want to overwrite a column.
908       
909        Note, in column_name white space is removed and case is not checked.
910        Precondition
911        The column_name and column_values cannot have comma's in it.
912        """
913       
914        value_row_count = \
915            len(self._attribute_dic[self._title_index_dic.keys()[0]])
916        if len(column_values) <> value_row_count: 
917            msg = 'The number of column values must equal the number of rows.'
918            raise DataMissingValuesError, msg
919       
920        if self._attribute_dic.has_key(column_name):
921            if not overwrite:
922                msg = 'Column name %s already in use!' %column_name
923                raise TitleValueError, msg
924        else:
925            # New title.  Add it to the title index.
926            self._title_index_dic[column_name] = len(self._title_index_dic)
927        self._attribute_dic[column_name] = column_values
928        #print "self._title_index_dic[column_name]",self._title_index_dic
929
930    def save(self, file_name=None):
931        """
932        Save the exposure csv file
933        """
934        if file_name is None:
935            file_name = self._file_name
936       
937        fd = open(file_name,'wb')
938        writer = csv.writer(fd)
939       
940        #Write the title to a cvs file
941        line = [None]* len(self._title_index_dic)
942        for title in self._title_index_dic.iterkeys():
943            line[self._title_index_dic[title]]= title
944        writer.writerow(line)
945       
946        # Write the values to a cvs file
947        value_row_count = \
948            len(self._attribute_dic[self._title_index_dic.keys()[0]])
949        for row_i in range(value_row_count):
950            line = [None]* len(self._title_index_dic)
951            for title in self._title_index_dic.iterkeys():
952                line[self._title_index_dic[title]]= \
953                     self._attribute_dic[title][row_i]
954            writer.writerow(line)
955
956
957def csv2array(file_name):
958    """Convert CSV files of the form
959   
960    time, discharge, velocity
961    0.0,  1.2,       0.0
962    0.1,  3.2,       1.1
963    ...
964   
965    to a dictionary of numeric arrays.
966   
967   
968    See underlying function csv2dict for more details.
969   
970    """
971   
972   
973    X, _ = csv2dict(file_name)
974   
975    Y = {}
976    for key in X.keys():
977        Y[key] = array([float(x) for x in X[key]])
978       
979    return Y   
980   
981           
982def csv2dict(file_name, title_check_list=None):
983    """
984    Load in the csv as a dic, title as key and column info as value, .
985    Also, create a dic, title as key and column index as value,
986    to keep track of the column order.
987
988    Two dictionaries are returned.
989   
990    WARNING: Values are returned as strings.
991    do this to change a list of strings to a list of floats
992        time = [float(x) for x in time]
993
994       
995    """
996   
997    #
998    attribute_dic = {}
999    title_index_dic = {}
1000    titles_stripped = [] # list of titles
1001    reader = csv.reader(file(file_name))
1002
1003    # Read in and manipulate the title info
1004    titles = reader.next()
1005    for i,title in enumerate(titles):
1006        titles_stripped.append(title.strip())
1007        title_index_dic[title.strip()] = i
1008    title_count = len(titles_stripped)       
1009    #print "title_index_dic",title_index_dic
1010    if title_check_list is not None:
1011        for title_check in title_check_list:
1012            #msg = "Reading error.  This row is not present ", title_check
1013            #assert title_index_dic.has_key(title_check), msg
1014            if not title_index_dic.has_key(title_check):
1015                #reader.close()
1016                msg = "Reading error.  This row is not present ", \
1017                      title_check                     
1018                raise IOError, msg
1019               
1020   
1021   
1022    #create a dic of colum values, indexed by column title
1023    for line in reader:
1024        if len(line) <> title_count:
1025            raise IOError #FIXME make this nicer
1026        for i, value in enumerate(line):
1027            attribute_dic.setdefault(titles_stripped[i],[]).append(value)
1028       
1029    return attribute_dic, title_index_dic
1030
1031
1032#Auxiliary
1033def write_obj(filename,x,y,z):
1034    """Store x,y,z vectors into filename (obj format)
1035       Vectors are assumed to have dimension (M,3) where
1036       M corresponds to the number elements.
1037       triangles are assumed to be disconnected
1038
1039       The three numbers in each vector correspond to three vertices,
1040
1041       e.g. the x coordinate of vertex 1 of element i is in x[i,1]
1042
1043    """
1044    #print 'Writing obj to %s' % filename
1045
1046    import os.path
1047
1048    root, ext = os.path.splitext(filename)
1049    if ext == '.obj':
1050        FN = filename
1051    else:
1052        FN = filename + '.obj'
1053
1054
1055    outfile = open(FN, 'wb')
1056    outfile.write("# Triangulation as an obj file\n")
1057
1058    M, N = x.shape
1059    assert N==3  #Assuming three vertices per element
1060
1061    for i in range(M):
1062        for j in range(N):
1063            outfile.write("v %f %f %f\n" % (x[i,j],y[i,j],z[i,j]))
1064
1065    for i in range(M):
1066        base = i*N
1067        outfile.write("f %d %d %d\n" % (base+1,base+2,base+3))
1068
1069    outfile.close()
1070
1071
1072#########################################################
1073#Conversion routines
1074########################################################
1075
1076def sww2obj(basefilename, size):
1077    """Convert netcdf based data output to obj
1078    """
1079    from Scientific.IO.NetCDF import NetCDFFile
1080
1081    from Numeric import Float, zeros
1082
1083    #Get NetCDF
1084    FN = create_filename('.', basefilename, 'sww', size)
1085    print 'Reading from ', FN
1086    fid = NetCDFFile(FN, 'r')  #Open existing file for read
1087
1088
1089    # Get the variables
1090    x = fid.variables['x']
1091    y = fid.variables['y']
1092    z = fid.variables['elevation']
1093    time = fid.variables['time']
1094    stage = fid.variables['stage']
1095
1096    M = size  #Number of lines
1097    xx = zeros((M,3), Float)
1098    yy = zeros((M,3), Float)
1099    zz = zeros((M,3), Float)
1100
1101    for i in range(M):
1102        for j in range(3):
1103            xx[i,j] = x[i+j*M]
1104            yy[i,j] = y[i+j*M]
1105            zz[i,j] = z[i+j*M]
1106
1107    #Write obj for bathymetry
1108    FN = create_filename('.', basefilename, 'obj', size)
1109    write_obj(FN,xx,yy,zz)
1110
1111
1112    #Now read all the data with variable information, combine with
1113    #x,y info and store as obj
1114
1115    for k in range(len(time)):
1116        t = time[k]
1117        print 'Processing timestep %f' %t
1118
1119        for i in range(M):
1120            for j in range(3):
1121                zz[i,j] = stage[k,i+j*M]
1122
1123
1124        #Write obj for variable data
1125        #FN = create_filename(basefilename, 'obj', size, time=t)
1126        FN = create_filename('.', basefilename[:5], 'obj', size, time=t)
1127        write_obj(FN,xx,yy,zz)
1128
1129
1130def dat2obj(basefilename):
1131    """Convert line based data output to obj
1132    FIXME: Obsolete?
1133    """
1134
1135    import glob, os
1136    from anuga.config import data_dir
1137
1138
1139    #Get bathymetry and x,y's
1140    lines = open(data_dir+os.sep+basefilename+'_geometry.dat', 'r').readlines()
1141
1142    from Numeric import zeros, Float
1143
1144    M = len(lines)  #Number of lines
1145    x = zeros((M,3), Float)
1146    y = zeros((M,3), Float)
1147    z = zeros((M,3), Float)
1148
1149    ##i = 0
1150    for i, line in enumerate(lines):
1151        tokens = line.split()
1152        values = map(float,tokens)
1153
1154        for j in range(3):
1155            x[i,j] = values[j*3]
1156            y[i,j] = values[j*3+1]
1157            z[i,j] = values[j*3+2]
1158
1159        ##i += 1
1160
1161
1162    #Write obj for bathymetry
1163    write_obj(data_dir+os.sep+basefilename+'_geometry',x,y,z)
1164
1165
1166    #Now read all the data files with variable information, combine with
1167    #x,y info
1168    #and store as obj
1169
1170    files = glob.glob(data_dir+os.sep+basefilename+'*.dat')
1171
1172    for filename in files:
1173        print 'Processing %s' % filename
1174
1175        lines = open(data_dir+os.sep+filename,'r').readlines()
1176        assert len(lines) == M
1177        root, ext = os.path.splitext(filename)
1178
1179        #Get time from filename
1180        i0 = filename.find('_time=')
1181        if i0 == -1:
1182            #Skip bathymetry file
1183            continue
1184
1185        i0 += 6  #Position where time starts
1186        i1 = filename.find('.dat')
1187
1188        if i1 > i0:
1189            t = float(filename[i0:i1])
1190        else:
1191            raise DataTimeError, 'Hmmmm'
1192
1193
1194
1195        ##i = 0
1196        for i, line in enumerate(lines):
1197            tokens = line.split()
1198            values = map(float,tokens)
1199
1200            for j in range(3):
1201                z[i,j] = values[j]
1202
1203            ##i += 1
1204
1205        #Write obj for variable data
1206        write_obj(data_dir+os.sep+basefilename+'_time=%.4f' %t,x,y,z)
1207
1208
1209def filter_netcdf(filename1, filename2, first=0, last=None, step = 1):
1210    """Read netcdf filename1, pick timesteps first:step:last and save to
1211    nettcdf file filename2
1212    """
1213    from Scientific.IO.NetCDF import NetCDFFile
1214
1215    #Get NetCDF
1216    infile = NetCDFFile(filename1, 'r')  #Open existing file for read
1217    outfile = NetCDFFile(filename2, 'w')  #Open new file
1218
1219
1220    #Copy dimensions
1221    for d in infile.dimensions:
1222        outfile.createDimension(d, infile.dimensions[d])
1223
1224    for name in infile.variables:
1225        var = infile.variables[name]
1226        outfile.createVariable(name, var.typecode(), var.dimensions)
1227
1228
1229    #Copy the static variables
1230    for name in infile.variables:
1231        if name == 'time' or name == 'stage':
1232            pass
1233        else:
1234            #Copy
1235            outfile.variables[name][:] = infile.variables[name][:]
1236
1237    #Copy selected timesteps
1238    time = infile.variables['time']
1239    stage = infile.variables['stage']
1240
1241    newtime = outfile.variables['time']
1242    newstage = outfile.variables['stage']
1243
1244    if last is None:
1245        last = len(time)
1246
1247    selection = range(first, last, step)
1248    for i, j in enumerate(selection):
1249        print 'Copying timestep %d of %d (%f)' %(j, last-first, time[j])
1250        newtime[i] = time[j]
1251        newstage[i,:] = stage[j,:]
1252
1253    #Close
1254    infile.close()
1255    outfile.close()
1256
1257
1258#Get data objects
1259def get_dataobject(domain, mode='w'):
1260    """Return instance of class of given format using filename
1261    """
1262
1263    cls = eval('Data_format_%s' %domain.format)
1264    return cls(domain, mode)
1265
1266
1267
1268
1269def dem2pts(basename_in, basename_out=None,
1270            easting_min=None, easting_max=None,
1271            northing_min=None, northing_max=None,
1272            use_cache=False, verbose=False,):
1273    """Read Digitial Elevation model from the following NetCDF format (.dem)
1274
1275    Example:
1276
1277    ncols         3121
1278    nrows         1800
1279    xllcorner     722000
1280    yllcorner     5893000
1281    cellsize      25
1282    NODATA_value  -9999
1283    138.3698 137.4194 136.5062 135.5558 ..........
1284
1285    Convert to NetCDF pts format which is
1286
1287    points:  (Nx2) Float array
1288    elevation: N Float array
1289    """
1290
1291
1292
1293    kwargs = {'basename_out': basename_out,
1294              'easting_min': easting_min,
1295              'easting_max': easting_max,
1296              'northing_min': northing_min,
1297              'northing_max': northing_max,
1298              'verbose': verbose}
1299
1300    if use_cache is True:
1301        from caching import cache
1302        result = cache(_dem2pts, basename_in, kwargs,
1303                       dependencies = [basename_in + '.dem'],
1304                       verbose = verbose)
1305
1306    else:
1307        result = apply(_dem2pts, [basename_in], kwargs)
1308
1309    return result
1310
1311
1312def _dem2pts(basename_in, basename_out=None, verbose=False,
1313            easting_min=None, easting_max=None,
1314            northing_min=None, northing_max=None):
1315    """Read Digitial Elevation model from the following NetCDF format (.dem)
1316
1317    Internal function. See public function dem2pts for details.
1318    """
1319
1320    # FIXME: Can this be written feasibly using write_pts?
1321
1322    import os
1323    from Scientific.IO.NetCDF import NetCDFFile
1324    from Numeric import Float, zeros, reshape, sum
1325
1326    root = basename_in
1327
1328    # Get NetCDF
1329    infile = NetCDFFile(root + '.dem', 'r')  # Open existing netcdf file for read
1330
1331    if verbose: print 'Reading DEM from %s' %(root + '.dem')
1332
1333    ncols = infile.ncols[0]
1334    nrows = infile.nrows[0]
1335    xllcorner = infile.xllcorner[0]  # Easting of lower left corner
1336    yllcorner = infile.yllcorner[0]  # Northing of lower left corner
1337    cellsize = infile.cellsize[0]
1338    NODATA_value = infile.NODATA_value[0]
1339    dem_elevation = infile.variables['elevation']
1340
1341    zone = infile.zone[0]
1342    false_easting = infile.false_easting[0]
1343    false_northing = infile.false_northing[0]
1344
1345    # Text strings
1346    projection = infile.projection
1347    datum = infile.datum
1348    units = infile.units
1349
1350
1351    # Get output file
1352    if basename_out == None:
1353        ptsname = root + '.pts'
1354    else:
1355        ptsname = basename_out + '.pts'
1356
1357    if verbose: print 'Store to NetCDF file %s' %ptsname
1358    # NetCDF file definition
1359    outfile = NetCDFFile(ptsname, 'w')
1360
1361    # Create new file
1362    outfile.institution = 'Geoscience Australia'
1363    outfile.description = 'NetCDF pts format for compact and portable storage ' +\
1364                      'of spatial point data'
1365    # Assign default values
1366    if easting_min is None: easting_min = xllcorner
1367    if easting_max is None: easting_max = xllcorner + ncols*cellsize
1368    if northing_min is None: northing_min = yllcorner
1369    if northing_max is None: northing_max = yllcorner + nrows*cellsize
1370
1371    # Compute offsets to update georeferencing
1372    easting_offset = xllcorner - easting_min
1373    northing_offset = yllcorner - northing_min
1374
1375    # Georeferencing
1376    outfile.zone = zone
1377    outfile.xllcorner = easting_min # Easting of lower left corner
1378    outfile.yllcorner = northing_min # Northing of lower left corner
1379    outfile.false_easting = false_easting
1380    outfile.false_northing = false_northing
1381
1382    outfile.projection = projection
1383    outfile.datum = datum
1384    outfile.units = units
1385
1386
1387    # Grid info (FIXME: probably not going to be used, but heck)
1388    outfile.ncols = ncols
1389    outfile.nrows = nrows
1390
1391    dem_elevation_r = reshape(dem_elevation, (nrows, ncols))
1392    totalnopoints = nrows*ncols
1393
1394    # Calculating number of NODATA_values for each row in clipped region
1395    # FIXME: use array operations to do faster
1396    nn = 0
1397    k = 0
1398    i1_0 = 0
1399    j1_0 = 0
1400    thisj = 0
1401    thisi = 0
1402    for i in range(nrows):
1403        y = (nrows-i-1)*cellsize + yllcorner
1404        for j in range(ncols):
1405            x = j*cellsize + xllcorner
1406            if easting_min <= x <= easting_max and \
1407               northing_min <= y <= northing_max:
1408                thisj = j
1409                thisi = i
1410                if dem_elevation_r[i,j] == NODATA_value: nn += 1
1411
1412                if k == 0:
1413                    i1_0 = i
1414                    j1_0 = j
1415                k += 1
1416
1417    index1 = j1_0
1418    index2 = thisj
1419
1420    # Dimension definitions
1421    nrows_in_bounding_box = int(round((northing_max-northing_min)/cellsize))
1422    ncols_in_bounding_box = int(round((easting_max-easting_min)/cellsize))
1423
1424    clippednopoints = (thisi+1-i1_0)*(thisj+1-j1_0)
1425    nopoints = clippednopoints-nn
1426
1427    clipped_dem_elev = dem_elevation_r[i1_0:thisi+1,j1_0:thisj+1]
1428
1429    if verbose:
1430        print 'There are %d values in the elevation' %totalnopoints
1431        print 'There are %d values in the clipped elevation' %clippednopoints
1432        print 'There are %d NODATA_values in the clipped elevation' %nn
1433
1434    outfile.createDimension('number_of_points', nopoints)
1435    outfile.createDimension('number_of_dimensions', 2) #This is 2d data
1436
1437    # Variable definitions
1438    outfile.createVariable('points', Float, ('number_of_points',
1439                                             'number_of_dimensions'))
1440    outfile.createVariable('elevation', Float, ('number_of_points',))
1441
1442    # Get handles to the variables
1443    points = outfile.variables['points']
1444    elevation = outfile.variables['elevation']
1445
1446    lenv = index2-index1+1
1447    # Store data
1448    global_index = 0
1449    # for i in range(nrows):
1450    for i in range(i1_0,thisi+1,1):
1451        if verbose and i%((nrows+10)/10)==0:
1452            print 'Processing row %d of %d' %(i, nrows)
1453
1454        lower_index = global_index
1455
1456        v = dem_elevation_r[i,index1:index2+1]
1457        no_NODATA = sum(v == NODATA_value)
1458        if no_NODATA > 0:
1459            newcols = lenv - no_NODATA # ncols_in_bounding_box - no_NODATA
1460        else:
1461            newcols = lenv # ncols_in_bounding_box
1462
1463        telev = zeros(newcols, Float)
1464        tpoints = zeros((newcols, 2), Float)
1465
1466        local_index = 0
1467
1468        y = (nrows-i-1)*cellsize + yllcorner
1469        #for j in range(ncols):
1470        for j in range(j1_0,index2+1,1):
1471
1472            x = j*cellsize + xllcorner
1473            if easting_min <= x <= easting_max and \
1474               northing_min <= y <= northing_max and \
1475               dem_elevation_r[i,j] <> NODATA_value:
1476                tpoints[local_index, :] = [x-easting_min,y-northing_min]
1477                telev[local_index] = dem_elevation_r[i, j]
1478                global_index += 1
1479                local_index += 1
1480
1481        upper_index = global_index
1482
1483        if upper_index == lower_index + newcols:
1484            points[lower_index:upper_index, :] = tpoints
1485            elevation[lower_index:upper_index] = telev
1486
1487    assert global_index == nopoints, 'index not equal to number of points'
1488
1489    infile.close()
1490    outfile.close()
1491
1492
1493
1494def _read_hecras_cross_sections(lines):
1495    """Return block of surface lines for each cross section
1496    Starts with SURFACE LINE,
1497    Ends with END CROSS-SECTION
1498    """
1499
1500    points = []
1501
1502    reading_surface = False
1503    for i, line in enumerate(lines):
1504
1505        if len(line.strip()) == 0:    #Ignore blanks
1506            continue
1507
1508        if lines[i].strip().startswith('SURFACE LINE'):
1509            reading_surface = True
1510            continue
1511
1512        if lines[i].strip().startswith('END') and reading_surface:
1513            yield points
1514            reading_surface = False
1515            points = []
1516
1517        if reading_surface:
1518            fields = line.strip().split(',')
1519            easting = float(fields[0])
1520            northing = float(fields[1])
1521            elevation = float(fields[2])
1522            points.append([easting, northing, elevation])
1523
1524
1525
1526
1527def hecras_cross_sections2pts(basename_in,
1528                              basename_out=None,
1529                              verbose=False):
1530    """Read HEC-RAS Elevation datal from the following ASCII format (.sdf)
1531
1532    Example:
1533
1534
1535# RAS export file created on Mon 15Aug2005 11:42
1536# by HEC-RAS Version 3.1.1
1537
1538BEGIN HEADER:
1539  UNITS: METRIC
1540  DTM TYPE: TIN
1541  DTM: v:\1\cit\perth_topo\river_tin
1542  STREAM LAYER: c:\local\hecras\21_02_03\up_canning_cent3d.shp
1543  CROSS-SECTION LAYER: c:\local\hecras\21_02_03\up_can_xs3d.shp
1544  MAP PROJECTION: UTM
1545  PROJECTION ZONE: 50
1546  DATUM: AGD66
1547  VERTICAL DATUM:
1548  NUMBER OF REACHES:  19
1549  NUMBER OF CROSS-SECTIONS:  14206
1550END HEADER:
1551
1552
1553Only the SURFACE LINE data of the following form will be utilised
1554
1555  CROSS-SECTION:
1556    STREAM ID:Southern-Wungong
1557    REACH ID:Southern-Wungong
1558    STATION:19040.*
1559    CUT LINE:
1560      405548.671603161 , 6438142.7594925
1561      405734.536092045 , 6438326.10404912
1562      405745.130459356 , 6438331.48627354
1563      405813.89633823 , 6438368.6272789
1564    SURFACE LINE:
1565     405548.67,   6438142.76,   35.37
1566     405552.24,   6438146.28,   35.41
1567     405554.78,   6438148.78,   35.44
1568     405555.80,   6438149.79,   35.44
1569     405559.37,   6438153.31,   35.45
1570     405560.88,   6438154.81,   35.44
1571     405562.93,   6438156.83,   35.42
1572     405566.50,   6438160.35,   35.38
1573     405566.99,   6438160.83,   35.37
1574     ...
1575   END CROSS-SECTION
1576
1577    Convert to NetCDF pts format which is
1578
1579    points:  (Nx2) Float array
1580    elevation: N Float array
1581    """
1582
1583    import os
1584    from Scientific.IO.NetCDF import NetCDFFile
1585    from Numeric import Float, zeros, reshape
1586
1587    root = basename_in
1588
1589    #Get ASCII file
1590    infile = open(root + '.sdf', 'r')  #Open SDF file for read
1591
1592    if verbose: print 'Reading DEM from %s' %(root + '.sdf')
1593
1594    lines = infile.readlines()
1595    infile.close()
1596
1597    if verbose: print 'Converting to pts format'
1598
1599    i = 0
1600    while lines[i].strip() == '' or lines[i].strip().startswith('#'):
1601        i += 1
1602
1603    assert lines[i].strip().upper() == 'BEGIN HEADER:'
1604    i += 1
1605
1606    assert lines[i].strip().upper().startswith('UNITS:')
1607    units = lines[i].strip().split()[1]
1608    i += 1
1609
1610    assert lines[i].strip().upper().startswith('DTM TYPE:')
1611    i += 1
1612
1613    assert lines[i].strip().upper().startswith('DTM:')
1614    i += 1
1615
1616    assert lines[i].strip().upper().startswith('STREAM')
1617    i += 1
1618
1619    assert lines[i].strip().upper().startswith('CROSS')
1620    i += 1
1621
1622    assert lines[i].strip().upper().startswith('MAP PROJECTION:')
1623    projection = lines[i].strip().split(':')[1]
1624    i += 1
1625
1626    assert lines[i].strip().upper().startswith('PROJECTION ZONE:')
1627    zone = int(lines[i].strip().split(':')[1])
1628    i += 1
1629
1630    assert lines[i].strip().upper().startswith('DATUM:')
1631    datum = lines[i].strip().split(':')[1]
1632    i += 1
1633
1634    assert lines[i].strip().upper().startswith('VERTICAL DATUM:')
1635    i += 1
1636
1637    assert lines[i].strip().upper().startswith('NUMBER OF REACHES:')
1638    i += 1
1639
1640    assert lines[i].strip().upper().startswith('NUMBER OF CROSS-SECTIONS:')
1641    number_of_cross_sections = int(lines[i].strip().split(':')[1])
1642    i += 1
1643
1644
1645    #Now read all points
1646    points = []
1647    elevation = []
1648    for j, entries in enumerate(_read_hecras_cross_sections(lines[i:])):
1649        for k, entry in enumerate(entries):
1650            points.append(entry[:2])
1651            elevation.append(entry[2])
1652
1653
1654    msg = 'Actual #number_of_cross_sections == %d, Reported as %d'\
1655          %(j+1, number_of_cross_sections)
1656    assert j+1 == number_of_cross_sections, msg
1657
1658    #Get output file
1659    if basename_out == None:
1660        ptsname = root + '.pts'
1661    else:
1662        ptsname = basename_out + '.pts'
1663
1664    geo_ref = Geo_reference(zone, 0, 0, datum, projection, units)
1665    geo = Geospatial_data(points, {"elevation":elevation},
1666                          verbose=verbose, geo_reference=geo_ref)
1667    geo.export_points_file(ptsname)
1668
1669def export_grid(basename_in, extra_name_out = None,
1670                quantities = None, # defaults to elevation
1671                timestep = None,
1672                reduction = None,
1673                cellsize = 10,
1674                number_of_decimal_places = None,
1675                NODATA_value = -9999,
1676                easting_min = None,
1677                easting_max = None,
1678                northing_min = None,
1679                northing_max = None,
1680                verbose = False,
1681                origin = None,
1682                datum = 'WGS84',
1683                format = 'ers'):
1684    """
1685   
1686    Wrapper for sww2dem. - see sww2dem to find out what most of the
1687    parameters do.
1688
1689    Quantities is a list of quantities.  Each quantity will be
1690    calculated for each sww file.
1691
1692    This returns the basenames of the files returned, which is made up
1693    of the dir and all of the file name, except the extension.
1694
1695    This function returns the names of the files produced.
1696
1697    It will also produce as many output files as there are input sww files.
1698    """
1699   
1700    if quantities is None:
1701        quantities = ['elevation']
1702       
1703    if type(quantities) is str:
1704            quantities = [quantities]
1705
1706    # How many sww files are there?
1707    dir, base = os.path.split(basename_in)
1708    #print "basename_in",basename_in
1709    #print "base",base
1710
1711    iterate_over = get_all_swwfiles(dir,base,verbose)
1712   
1713    if dir == "":
1714        dir = "." # Unix compatibility
1715   
1716    files_out = []
1717    #print 'sww_file',iterate_over
1718    for sww_file in iterate_over:
1719        for quantity in quantities:
1720            if extra_name_out is None:
1721                basename_out = sww_file + '_' + quantity
1722            else:
1723                basename_out = sww_file + '_' + quantity + '_' \
1724                               + extra_name_out
1725#            print "basename_out", basename_out
1726       
1727            file_out = sww2dem(dir+sep+sww_file, dir+sep+basename_out,
1728                               quantity, 
1729                               timestep,
1730                               reduction,
1731                               cellsize,
1732                               number_of_decimal_places,
1733                               NODATA_value,
1734                               easting_min,
1735                               easting_max,
1736                               northing_min,
1737                               northing_max,
1738                               verbose,
1739                               origin,
1740                               datum,
1741                               format)
1742            files_out.append(file_out)
1743    #print "basenames_out after",basenames_out
1744    return files_out
1745
1746
1747def get_timeseries(production_dirs, output_dir, scenario_name, gauges_dir_name,
1748                   plot_quantity, generate_fig = False,
1749                   reportname = None, surface = False, time_min = None,
1750                   time_max = None, title_on = False, verbose = True,
1751                   nodes=None):
1752    """
1753    nodes - number of processes used.
1754
1755    warning - this function has no tests
1756    """
1757    if reportname == None:
1758        report = False
1759    else:
1760        report = True
1761       
1762    if nodes is None:
1763        is_parallel = False
1764    else:
1765        is_parallel = True
1766       
1767    # Generate figures
1768    swwfiles = {}
1769   
1770    if is_parallel is True:   
1771        for i in range(nodes):
1772            print 'Sending node %d of %d' %(i,nodes)
1773            swwfiles = {}
1774            if not reportname == None:
1775                reportname = report_name + '_%s' %(i)
1776            for label_id in production_dirs.keys():
1777                if label_id == 'boundaries':
1778                    swwfile = best_boundary_sww
1779                else:
1780                    file_loc = output_dir + label_id + sep
1781                    sww_extra = '_P%s_%s' %(i,nodes)
1782                    swwfile = file_loc + scenario_name + sww_extra + '.sww'
1783                    print 'swwfile',swwfile
1784                    swwfiles[swwfile] = label_id
1785
1786                texname, elev_output = sww2timeseries(swwfiles,
1787                                              gauges_dir_name,
1788                                              production_dirs,
1789                                              report = report,
1790                                              reportname = reportname,
1791                                              plot_quantity = plot_quantity,
1792                                              generate_fig = generate_fig,
1793                                              surface = surface,
1794                                              time_min = time_min,
1795                                              time_max = time_max,
1796                                              title_on = title_on,
1797                                              verbose = verbose)
1798    else:   
1799        for label_id in production_dirs.keys():       
1800            if label_id == 'boundaries':
1801                print 'boundaries'
1802                file_loc = project.boundaries_in_dir
1803                swwfile = project.boundaries_dir_name3 + '.sww'
1804                #  swwfile = boundary_dir_filename
1805            else:
1806                file_loc = output_dir + label_id + sep
1807                swwfile = file_loc + scenario_name + '.sww'
1808            swwfiles[swwfile] = label_id
1809       
1810        texname, elev_output = sww2timeseries(swwfiles,
1811                                              gauges_dir_name,
1812                                              production_dirs,
1813                                              report = report,
1814                                              reportname = reportname,
1815                                              plot_quantity = plot_quantity,
1816                                              generate_fig = generate_fig,
1817                                              surface = surface,
1818                                              time_min = time_min,
1819                                              time_max = time_max,
1820                                              title_on = title_on,
1821                                              verbose = verbose)
1822                                         
1823
1824   
1825def sww2dem(basename_in, basename_out = None,
1826            quantity = None, # defaults to elevation
1827            timestep = None,
1828            reduction = None,
1829            cellsize = 10,
1830            number_of_decimal_places = None,
1831            NODATA_value = -9999,
1832            easting_min = None,
1833            easting_max = None,
1834            northing_min = None,
1835            northing_max = None,
1836            verbose = False,
1837            origin = None,
1838            datum = 'WGS84',
1839            format = 'ers'):
1840
1841    """Read SWW file and convert to Digitial Elevation model format
1842    (.asc or .ers)
1843
1844    Example (ASC):
1845
1846    ncols         3121
1847    nrows         1800
1848    xllcorner     722000
1849    yllcorner     5893000
1850    cellsize      25
1851    NODATA_value  -9999
1852    138.3698 137.4194 136.5062 135.5558 ..........
1853
1854    The number of decimal places can be specified by the user to save
1855    on disk space requirements by specifying in the call to sww2dem.
1856   
1857    Also write accompanying file with same basename_in but extension .prj
1858    used to fix the UTM zone, datum, false northings and eastings.
1859
1860    The prj format is assumed to be as
1861
1862    Projection    UTM
1863    Zone          56
1864    Datum         WGS84
1865    Zunits        NO
1866    Units         METERS
1867    Spheroid      WGS84
1868    Xshift        0.0000000000
1869    Yshift        10000000.0000000000
1870    Parameters
1871
1872
1873    The parameter quantity must be the name of an existing quantity or
1874    an expression involving existing quantities. The default is
1875    'elevation'. Quantity is not a list of quantities.
1876
1877    if timestep (an index) is given, output quantity at that timestep
1878
1879    if reduction is given use that to reduce quantity over all timesteps.
1880
1881    datum
1882
1883    format can be either 'asc' or 'ers'
1884    """
1885
1886    import sys
1887    from Numeric import array, Float, concatenate, NewAxis, zeros, reshape, \
1888         sometrue
1889    from Numeric import array2string
1890
1891    from anuga.utilities.polygon import inside_polygon, outside_polygon, \
1892         separate_points_by_polygon
1893    from anuga.abstract_2d_finite_volumes.util import \
1894         apply_expression_to_dictionary
1895
1896    msg = 'Format must be either asc or ers'
1897    assert format.lower() in ['asc', 'ers'], msg
1898
1899
1900    false_easting = 500000
1901    false_northing = 10000000
1902
1903    if quantity is None:
1904        quantity = 'elevation'
1905       
1906    if reduction is None:
1907        reduction = max
1908
1909    if basename_out is None:
1910        basename_out = basename_in + '_%s' %quantity
1911
1912    if quantity_formula.has_key(quantity):
1913        quantity = quantity_formula[quantity]
1914
1915    if number_of_decimal_places is None:
1916        number_of_decimal_places = 3
1917       
1918    swwfile = basename_in + '.sww'
1919    demfile = basename_out + '.' + format
1920    # Note the use of a .ers extension is optional (write_ermapper_grid will
1921    # deal with either option
1922   
1923    #if verbose: bye= nsuadsfd[0] # uncomment to check catching verbose errors
1924   
1925    # Read sww file
1926    if verbose: 
1927        print 'Reading from %s' %swwfile
1928        print 'Output directory is %s' %basename_out
1929   
1930    from Scientific.IO.NetCDF import NetCDFFile
1931    fid = NetCDFFile(swwfile)
1932
1933    #Get extent and reference
1934    x = fid.variables['x'][:]
1935    y = fid.variables['y'][:]
1936    volumes = fid.variables['volumes'][:]
1937    if timestep is not None:
1938        times = fid.variables['time'][timestep]
1939    else:
1940        times = fid.variables['time'][:]
1941
1942    number_of_timesteps = fid.dimensions['number_of_timesteps']
1943    number_of_points = fid.dimensions['number_of_points']
1944   
1945    if origin is None:
1946
1947        # Get geo_reference
1948        # sww files don't have to have a geo_ref
1949        try:
1950            geo_reference = Geo_reference(NetCDFObject=fid)
1951        except AttributeError, e:
1952            geo_reference = Geo_reference() # Default georef object
1953
1954        xllcorner = geo_reference.get_xllcorner()
1955        yllcorner = geo_reference.get_yllcorner()
1956        zone = geo_reference.get_zone()
1957    else:
1958        zone = origin[0]
1959        xllcorner = origin[1]
1960        yllcorner = origin[2]
1961
1962
1963
1964    # FIXME: Refactor using code from Interpolation_function.statistics
1965    # (in interpolate.py)
1966    # Something like print swwstats(swwname)
1967    if verbose:
1968        print '------------------------------------------------'
1969        print 'Statistics of SWW file:'
1970        print '  Name: %s' %swwfile
1971        print '  Reference:'
1972        print '    Lower left corner: [%f, %f]'\
1973              %(xllcorner, yllcorner)
1974        if timestep is not None:
1975            print '    Time: %f' %(times)
1976        else:
1977            print '    Start time: %f' %fid.starttime[0]
1978        print '  Extent:'
1979        print '    x [m] in [%f, %f], len(x) == %d'\
1980              %(min(x.flat), max(x.flat), len(x.flat))
1981        print '    y [m] in [%f, %f], len(y) == %d'\
1982              %(min(y.flat), max(y.flat), len(y.flat))
1983        if timestep is not None:
1984            print '    t [s] = %f, len(t) == %d' %(times, 1)
1985        else:
1986            print '    t [s] in [%f, %f], len(t) == %d'\
1987              %(min(times), max(times), len(times))
1988        print '  Quantities [SI units]:'
1989        # Comment out for reduced memory consumption
1990        for name in ['stage', 'xmomentum', 'ymomentum']:
1991            q = fid.variables[name][:].flat
1992            if timestep is not None:
1993                q = q[timestep*len(x):(timestep+1)*len(x)]
1994            if verbose: print '    %s in [%f, %f]' %(name, min(q), max(q))
1995        for name in ['elevation']:
1996            q = fid.variables[name][:].flat
1997            if verbose: print '    %s in [%f, %f]' %(name, min(q), max(q))
1998           
1999    # Get quantity and reduce if applicable
2000    if verbose: print 'Processing quantity %s' %quantity
2001
2002    # Turn NetCDF objects into Numeric arrays
2003    try:
2004        q = fid.variables[quantity][:] 
2005       
2006       
2007    except:
2008        quantity_dict = {}
2009        for name in fid.variables.keys():
2010            quantity_dict[name] = fid.variables[name][:] 
2011        #Convert quantity expression to quantities found in sww file   
2012        q = apply_expression_to_dictionary(quantity, quantity_dict)
2013    #print "q.shape",q.shape
2014    if len(q.shape) == 2:
2015        #q has a time component and needs to be reduced along
2016        #the temporal dimension
2017        if verbose: print 'Reducing quantity %s' %quantity
2018        q_reduced = zeros( number_of_points, Float )
2019       
2020        if timestep is not None:
2021            for k in range(number_of_points):
2022                q_reduced[k] = q[timestep,k] 
2023        else:
2024            for k in range(number_of_points):
2025                q_reduced[k] = reduction( q[:,k] )
2026
2027        q = q_reduced
2028
2029    #Post condition: Now q has dimension: number_of_points
2030    assert len(q.shape) == 1
2031    assert q.shape[0] == number_of_points
2032
2033    if verbose:
2034        print 'Processed values for %s are in [%f, %f]' %(quantity, min(q), max(q))
2035
2036
2037    #Create grid and update xll/yll corner and x,y
2038
2039    #Relative extent
2040    if easting_min is None:
2041        xmin = min(x)
2042    else:
2043        xmin = easting_min - xllcorner
2044
2045    if easting_max is None:
2046        xmax = max(x)
2047    else:
2048        xmax = easting_max - xllcorner
2049
2050    if northing_min is None:
2051        ymin = min(y)
2052    else:
2053        ymin = northing_min - yllcorner
2054
2055    if northing_max is None:
2056        ymax = max(y)
2057    else:
2058        ymax = northing_max - yllcorner
2059   
2060
2061    msg = 'xmax must be greater than or equal to xmin.\n'
2062    msg += 'I got xmin = %f, xmax = %f' %(xmin, xmax)
2063    assert xmax >= xmin, msg
2064
2065    msg = 'yax must be greater than or equal to xmin.\n'
2066    msg += 'I got ymin = %f, ymax = %f' %(ymin, ymax)
2067    assert ymax >= ymin, msg   
2068   
2069    if verbose: print 'Creating grid'
2070    ncols = int((xmax-xmin)/cellsize)+1
2071    nrows = int((ymax-ymin)/cellsize)+1
2072
2073
2074    #New absolute reference and coordinates
2075    newxllcorner = xmin+xllcorner
2076    newyllcorner = ymin+yllcorner
2077
2078    x = x+xllcorner-newxllcorner
2079    y = y+yllcorner-newyllcorner
2080   
2081    vertex_points = concatenate ((x[:, NewAxis] ,y[:, NewAxis]), axis = 1)
2082    assert len(vertex_points.shape) == 2
2083
2084    grid_points = zeros ( (ncols*nrows, 2), Float )
2085
2086
2087    for i in xrange(nrows):
2088        if format.lower() == 'asc':
2089            yg = i*cellsize
2090        else:
2091        #this will flip the order of the y values for ers
2092            yg = (nrows-i)*cellsize
2093
2094        for j in xrange(ncols):
2095            xg = j*cellsize
2096            k = i*ncols + j
2097
2098            grid_points[k,0] = xg
2099            grid_points[k,1] = yg
2100
2101    #Interpolate
2102    from anuga.fit_interpolate.interpolate import Interpolate
2103
2104    # Remove loners from vertex_points, volumes here
2105    vertex_points, volumes = remove_lone_verts(vertex_points, volumes)
2106    #export_mesh_file('monkey.tsh',{'vertices':vertex_points, 'triangles':volumes})
2107    #import sys; sys.exit()
2108    interp = Interpolate(vertex_points, volumes, verbose = verbose)
2109
2110    #Interpolate using quantity values
2111    if verbose: print 'Interpolating'
2112    grid_values = interp.interpolate(q, grid_points).flat
2113
2114
2115    if verbose:
2116        print 'Interpolated values are in [%f, %f]' %(min(grid_values),
2117                                                      max(grid_values))
2118
2119    #Assign NODATA_value to all points outside bounding polygon (from interpolation mesh)
2120    P = interp.mesh.get_boundary_polygon()
2121    outside_indices = outside_polygon(grid_points, P, closed=True)
2122
2123    for i in outside_indices:
2124        grid_values[i] = NODATA_value
2125
2126
2127
2128
2129    if format.lower() == 'ers':
2130        # setup ERS header information
2131        grid_values = reshape(grid_values,(nrows, ncols))
2132        header = {}
2133        header['datum'] = '"' + datum + '"'
2134        # FIXME The use of hardwired UTM and zone number needs to be made optional
2135        # FIXME Also need an automatic test for coordinate type (i.e. EN or LL)
2136        header['projection'] = '"UTM-' + str(zone) + '"'
2137        header['coordinatetype'] = 'EN'
2138        if header['coordinatetype'] == 'LL':
2139            header['longitude'] = str(newxllcorner)
2140            header['latitude'] = str(newyllcorner)
2141        elif header['coordinatetype'] == 'EN':
2142            header['eastings'] = str(newxllcorner)
2143            header['northings'] = str(newyllcorner)
2144        header['nullcellvalue'] = str(NODATA_value)
2145        header['xdimension'] = str(cellsize)
2146        header['ydimension'] = str(cellsize)
2147        header['value'] = '"' + quantity + '"'
2148        #header['celltype'] = 'IEEE8ByteReal'  #FIXME: Breaks unit test
2149
2150
2151        #Write
2152        if verbose: print 'Writing %s' %demfile
2153        import ermapper_grids
2154        ermapper_grids.write_ermapper_grid(demfile, grid_values, header)
2155
2156        fid.close()
2157    else:
2158        #Write to Ascii format
2159
2160        #Write prj file
2161        prjfile = basename_out + '.prj'
2162
2163        if verbose: print 'Writing %s' %prjfile
2164        prjid = open(prjfile, 'w')
2165        prjid.write('Projection    %s\n' %'UTM')
2166        prjid.write('Zone          %d\n' %zone)
2167        prjid.write('Datum         %s\n' %datum)
2168        prjid.write('Zunits        NO\n')
2169        prjid.write('Units         METERS\n')
2170        prjid.write('Spheroid      %s\n' %datum)
2171        prjid.write('Xshift        %d\n' %false_easting)
2172        prjid.write('Yshift        %d\n' %false_northing)
2173        prjid.write('Parameters\n')
2174        prjid.close()
2175
2176
2177
2178        if verbose: print 'Writing %s' %demfile
2179
2180        ascid = open(demfile, 'w')
2181
2182        ascid.write('ncols         %d\n' %ncols)
2183        ascid.write('nrows         %d\n' %nrows)
2184        ascid.write('xllcorner     %d\n' %newxllcorner)
2185        ascid.write('yllcorner     %d\n' %newyllcorner)
2186        ascid.write('cellsize      %f\n' %cellsize)
2187        ascid.write('NODATA_value  %d\n' %NODATA_value)
2188
2189
2190        #Get bounding polygon from mesh
2191        #P = interp.mesh.get_boundary_polygon()
2192        #inside_indices = inside_polygon(grid_points, P)
2193
2194        for i in range(nrows):
2195            if verbose and i%((nrows+10)/10)==0:
2196                print 'Doing row %d of %d' %(i, nrows)
2197
2198            base_index = (nrows-i-1)*ncols
2199
2200            slice = grid_values[base_index:base_index+ncols]
2201            #s = array2string(slice, max_line_width=sys.maxint)
2202            s = array2string(slice, max_line_width=sys.maxint, precision=number_of_decimal_places)
2203            ascid.write(s[1:-1] + '\n')
2204
2205            #print
2206            #for j in range(ncols):
2207            #    index = base_index+j#
2208            #    print grid_values[index],
2209            #    ascid.write('%f ' %grid_values[index])
2210            #ascid.write('\n')
2211
2212        #Close
2213        ascid.close()
2214        fid.close()
2215        return basename_out
2216
2217
2218#Backwards compatibility
2219def sww2asc(basename_in, basename_out = None,
2220            quantity = None,
2221            timestep = None,
2222            reduction = None,
2223            cellsize = 10,
2224            verbose = False,
2225            origin = None):
2226    print 'sww2asc will soon be obsoleted - please use sww2dem'
2227    sww2dem(basename_in,
2228            basename_out = basename_out,
2229            quantity = quantity,
2230            timestep = timestep,
2231            reduction = reduction,
2232            cellsize = cellsize,
2233            number_of_decimal_places = number_of_decimal_places,
2234            verbose = verbose,
2235            origin = origin,
2236        datum = 'WGS84',
2237        format = 'asc')
2238
2239def sww2ers(basename_in, basename_out = None,
2240            quantity = None,
2241            timestep = None,
2242            reduction = None,
2243            cellsize = 10,
2244            verbose = False,
2245            origin = None,
2246            datum = 'WGS84'):
2247    print 'sww2ers will soon be obsoleted - please use sww2dem'
2248    sww2dem(basename_in,
2249            basename_out = basename_out,
2250            quantity = quantity,
2251            timestep = timestep,
2252            reduction = reduction,
2253            cellsize = cellsize,
2254            number_of_decimal_places = number_of_decimal_places,
2255            verbose = verbose,
2256            origin = origin,
2257            datum = datum,
2258            format = 'ers')
2259################################# END COMPATIBILITY ##############
2260
2261
2262
2263def sww2pts(basename_in, basename_out=None,
2264            data_points=None,
2265            quantity=None,
2266            timestep=None,
2267            reduction=None,
2268            NODATA_value=-9999,
2269            verbose=False,
2270            origin=None):
2271            #datum = 'WGS84')
2272
2273
2274    """Read SWW file and convert to interpolated values at selected points
2275
2276    The parameter quantity' must be the name of an existing quantity or
2277    an expression involving existing quantities. The default is
2278    'elevation'.
2279
2280    if timestep (an index) is given, output quantity at that timestep
2281
2282    if reduction is given use that to reduce quantity over all timesteps.
2283
2284    data_points (Nx2 array) give locations of points where quantity is to be computed.
2285   
2286    """
2287
2288    import sys
2289    from Numeric import array, Float, concatenate, NewAxis, zeros, reshape, sometrue
2290    from Numeric import array2string
2291
2292    from anuga.utilities.polygon import inside_polygon, outside_polygon, separate_points_by_polygon
2293    from anuga.abstract_2d_finite_volumes.util import apply_expression_to_dictionary
2294
2295    from anuga.geospatial_data.geospatial_data import Geospatial_data
2296
2297    if quantity is None:
2298        quantity = 'elevation'
2299
2300    if reduction is None:
2301        reduction = max
2302
2303    if basename_out is None:
2304        basename_out = basename_in + '_%s' %quantity
2305
2306    swwfile = basename_in + '.sww'
2307    ptsfile = basename_out + '.pts'
2308
2309    # Read sww file
2310    if verbose: print 'Reading from %s' %swwfile
2311    from Scientific.IO.NetCDF import NetCDFFile
2312    fid = NetCDFFile(swwfile)
2313
2314    # Get extent and reference
2315    x = fid.variables['x'][:]
2316    y = fid.variables['y'][:]
2317    volumes = fid.variables['volumes'][:]
2318
2319    number_of_timesteps = fid.dimensions['number_of_timesteps']
2320    number_of_points = fid.dimensions['number_of_points']
2321    if origin is None:
2322
2323        # Get geo_reference
2324        # sww files don't have to have a geo_ref
2325        try:
2326            geo_reference = Geo_reference(NetCDFObject=fid)
2327        except AttributeError, e:
2328            geo_reference = Geo_reference() #Default georef object
2329
2330        xllcorner = geo_reference.get_xllcorner()
2331        yllcorner = geo_reference.get_yllcorner()
2332        zone = geo_reference.get_zone()
2333    else:
2334        zone = origin[0]
2335        xllcorner = origin[1]
2336        yllcorner = origin[2]
2337
2338
2339
2340    # FIXME: Refactor using code from file_function.statistics
2341    # Something like print swwstats(swwname)
2342    if verbose:
2343        x = fid.variables['x'][:]
2344        y = fid.variables['y'][:]
2345        times = fid.variables['time'][:]
2346        print '------------------------------------------------'
2347        print 'Statistics of SWW file:'
2348        print '  Name: %s' %swwfile
2349        print '  Reference:'
2350        print '    Lower left corner: [%f, %f]'\
2351              %(xllcorner, yllcorner)
2352        print '    Start time: %f' %fid.starttime[0]
2353        print '  Extent:'
2354        print '    x [m] in [%f, %f], len(x) == %d'\
2355              %(min(x.flat), max(x.flat), len(x.flat))
2356        print '    y [m] in [%f, %f], len(y) == %d'\
2357              %(min(y.flat), max(y.flat), len(y.flat))
2358        print '    t [s] in [%f, %f], len(t) == %d'\
2359              %(min(times), max(times), len(times))
2360        print '  Quantities [SI units]:'
2361        for name in ['stage', 'xmomentum', 'ymomentum', 'elevation']:
2362            q = fid.variables[name][:].flat
2363            print '    %s in [%f, %f]' %(name, min(q), max(q))
2364
2365
2366
2367    # Get quantity and reduce if applicable
2368    if verbose: print 'Processing quantity %s' %quantity
2369
2370    # Turn NetCDF objects into Numeric arrays
2371    quantity_dict = {}
2372    for name in fid.variables.keys():
2373        quantity_dict[name] = fid.variables[name][:]
2374
2375
2376
2377    # Convert quantity expression to quantities found in sww file   
2378    q = apply_expression_to_dictionary(quantity, quantity_dict)
2379
2380
2381
2382    if len(q.shape) == 2:
2383        # q has a time component and needs to be reduced along
2384        # the temporal dimension
2385        if verbose: print 'Reducing quantity %s' %quantity
2386        q_reduced = zeros( number_of_points, Float )
2387
2388        for k in range(number_of_points):
2389            q_reduced[k] = reduction( q[:,k] )
2390
2391        q = q_reduced
2392
2393    # Post condition: Now q has dimension: number_of_points
2394    assert len(q.shape) == 1
2395    assert q.shape[0] == number_of_points
2396
2397
2398    if verbose:
2399        print 'Processed values for %s are in [%f, %f]' %(quantity, min(q), max(q))
2400
2401
2402    # Create grid and update xll/yll corner and x,y
2403    vertex_points = concatenate ((x[:, NewAxis] ,y[:, NewAxis]), axis = 1)
2404    assert len(vertex_points.shape) == 2
2405
2406    # Interpolate
2407    from anuga.fit_interpolate.interpolate import Interpolate
2408    interp = Interpolate(vertex_points, volumes, verbose = verbose)
2409
2410    # Interpolate using quantity values
2411    if verbose: print 'Interpolating'
2412    interpolated_values = interp.interpolate(q, data_points).flat
2413
2414
2415    if verbose:
2416        print 'Interpolated values are in [%f, %f]' %(min(interpolated_values),
2417                                                      max(interpolated_values))
2418
2419    # Assign NODATA_value to all points outside bounding polygon (from interpolation mesh)
2420    P = interp.mesh.get_boundary_polygon()
2421    outside_indices = outside_polygon(data_points, P, closed=True)
2422
2423    for i in outside_indices:
2424        interpolated_values[i] = NODATA_value
2425
2426
2427    # Store results   
2428    G = Geospatial_data(data_points=data_points,
2429                        attributes=interpolated_values)
2430
2431    G.export_points_file(ptsfile, absolute = True)
2432
2433    fid.close()
2434
2435
2436def convert_dem_from_ascii2netcdf(basename_in, basename_out = None,
2437                                  use_cache = False,
2438                                  verbose = False):
2439    """Read Digitial Elevation model from the following ASCII format (.asc)
2440
2441    Example:
2442
2443    ncols         3121
2444    nrows         1800
2445    xllcorner     722000
2446    yllcorner     5893000
2447    cellsize      25
2448    NODATA_value  -9999
2449    138.3698 137.4194 136.5062 135.5558 ..........
2450
2451    Convert basename_in + '.asc' to NetCDF format (.dem)
2452    mimicking the ASCII format closely.
2453
2454
2455    An accompanying file with same basename_in but extension .prj must exist
2456    and is used to fix the UTM zone, datum, false northings and eastings.
2457
2458    The prj format is assumed to be as
2459
2460    Projection    UTM
2461    Zone          56
2462    Datum         WGS84
2463    Zunits        NO
2464    Units         METERS
2465    Spheroid      WGS84
2466    Xshift        0.0000000000
2467    Yshift        10000000.0000000000
2468    Parameters
2469    """
2470
2471
2472
2473    kwargs = {'basename_out': basename_out, 'verbose': verbose}
2474
2475    if use_cache is True:
2476        from caching import cache
2477        result = cache(_convert_dem_from_ascii2netcdf, basename_in, kwargs,
2478                       dependencies = [basename_in + '.asc',
2479                                       basename_in + '.prj'],
2480                       verbose = verbose)
2481
2482    else:
2483        result = apply(_convert_dem_from_ascii2netcdf, [basename_in], kwargs)
2484
2485    return result
2486
2487
2488
2489
2490
2491
2492def _convert_dem_from_ascii2netcdf(basename_in, basename_out = None,
2493                                  verbose = False):
2494    """Read Digitial Elevation model from the following ASCII format (.asc)
2495
2496    Internal function. See public function convert_dem_from_ascii2netcdf for details.
2497    """
2498
2499    import os
2500    from Scientific.IO.NetCDF import NetCDFFile
2501    from Numeric import Float, array
2502
2503    #root, ext = os.path.splitext(basename_in)
2504    root = basename_in
2505
2506    ###########################################
2507    # Read Meta data
2508    if verbose: print 'Reading METADATA from %s' %root + '.prj'
2509    metadatafile = open(root + '.prj')
2510    metalines = metadatafile.readlines()
2511    metadatafile.close()
2512
2513    L = metalines[0].strip().split()
2514    assert L[0].strip().lower() == 'projection'
2515    projection = L[1].strip()                   #TEXT
2516
2517    L = metalines[1].strip().split()
2518    assert L[0].strip().lower() == 'zone'
2519    zone = int(L[1].strip())
2520
2521    L = metalines[2].strip().split()
2522    assert L[0].strip().lower() == 'datum'
2523    datum = L[1].strip()                        #TEXT
2524
2525    L = metalines[3].strip().split()
2526    assert L[0].strip().lower() == 'zunits'     #IGNORE
2527    zunits = L[1].strip()                       #TEXT
2528
2529    L = metalines[4].strip().split()
2530    assert L[0].strip().lower() == 'units'
2531    units = L[1].strip()                        #TEXT
2532
2533    L = metalines[5].strip().split()
2534    assert L[0].strip().lower() == 'spheroid'   #IGNORE
2535    spheroid = L[1].strip()                     #TEXT
2536
2537    L = metalines[6].strip().split()
2538    assert L[0].strip().lower() == 'xshift'
2539    false_easting = float(L[1].strip())
2540
2541    L = metalines[7].strip().split()
2542    assert L[0].strip().lower() == 'yshift'
2543    false_northing = float(L[1].strip())
2544
2545    #print false_easting, false_northing, zone, datum
2546
2547
2548    ###########################################
2549    #Read DEM data
2550
2551    datafile = open(basename_in + '.asc')
2552
2553    if verbose: print 'Reading DEM from %s' %(basename_in + '.asc')
2554    lines = datafile.readlines()
2555    datafile.close()
2556
2557    if verbose: print 'Got', len(lines), ' lines'
2558
2559    ncols = int(lines[0].split()[1].strip())
2560    nrows = int(lines[1].split()[1].strip())
2561
2562    # Do cellsize (line 4) before line 2 and 3
2563    cellsize = float(lines[4].split()[1].strip())   
2564
2565    # Checks suggested by Joaquim Luis
2566    # Our internal representation of xllcorner
2567    # and yllcorner is non-standard.
2568    xref = lines[2].split()
2569    if xref[0].strip() == 'xllcorner':
2570        xllcorner = float(xref[1].strip()) # + 0.5*cellsize # Correct offset
2571    elif xref[0].strip() == 'xllcenter':
2572        xllcorner = float(xref[1].strip())
2573    else:
2574        msg = 'Unknown keyword: %s' %xref[0].strip()
2575        raise Exception, msg
2576
2577    yref = lines[3].split()
2578    if yref[0].strip() == 'yllcorner':
2579        yllcorner = float(yref[1].strip()) # + 0.5*cellsize # Correct offset
2580    elif yref[0].strip() == 'yllcenter':
2581        yllcorner = float(yref[1].strip())
2582    else:
2583        msg = 'Unknown keyword: %s' %yref[0].strip()
2584        raise Exception, msg
2585       
2586
2587    NODATA_value = int(lines[5].split()[1].strip())
2588
2589    assert len(lines) == nrows + 6
2590
2591
2592    ##########################################
2593
2594
2595    if basename_out == None:
2596        netcdfname = root + '.dem'
2597    else:
2598        netcdfname = basename_out + '.dem'
2599
2600    if verbose: print 'Store to NetCDF file %s' %netcdfname
2601    # NetCDF file definition
2602    fid = NetCDFFile(netcdfname, 'w')
2603
2604    #Create new file
2605    fid.institution = 'Geoscience Australia'
2606    fid.description = 'NetCDF DEM format for compact and portable storage ' +\
2607                      'of spatial point data'
2608
2609    fid.ncols = ncols
2610    fid.nrows = nrows
2611    fid.xllcorner = xllcorner
2612    fid.yllcorner = yllcorner
2613    fid.cellsize = cellsize
2614    fid.NODATA_value = NODATA_value
2615
2616    fid.zone = zone
2617    fid.false_easting = false_easting
2618    fid.false_northing = false_northing
2619    fid.projection = projection
2620    fid.datum = datum
2621    fid.units = units
2622
2623
2624    # dimension definitions
2625    fid.createDimension('number_of_rows', nrows)
2626    fid.createDimension('number_of_columns', ncols)
2627
2628    # variable definitions
2629    fid.createVariable('elevation', Float, ('number_of_rows',
2630                                            'number_of_columns'))
2631
2632    # Get handles to the variables
2633    elevation = fid.variables['elevation']
2634
2635    #Store data
2636    n = len(lines[6:])
2637    for i, line in enumerate(lines[6:]):
2638        fields = line.split()
2639        if verbose and i%((n+10)/10)==0:
2640            print 'Processing row %d of %d' %(i, nrows)
2641
2642        elevation[i, :] = array([float(x) for x in fields])
2643
2644    fid.close()
2645
2646
2647
2648
2649
2650def ferret2sww(basename_in, basename_out = None,
2651               verbose = False,
2652               minlat = None, maxlat = None,
2653               minlon = None, maxlon = None,
2654               mint = None, maxt = None, mean_stage = 0,
2655               origin = None, zscale = 1,
2656               fail_on_NaN = True,
2657               NaN_filler = 0,
2658               elevation = None,
2659               inverted_bathymetry = True
2660               ): #FIXME: Bathymetry should be obtained
2661                                  #from MOST somehow.
2662                                  #Alternatively from elsewhere
2663                                  #or, as a last resort,
2664                                  #specified here.
2665                                  #The value of -100 will work
2666                                  #for the Wollongong tsunami
2667                                  #scenario but is very hacky
2668    """Convert MOST and 'Ferret' NetCDF format for wave propagation to
2669    sww format native to abstract_2d_finite_volumes.
2670
2671    Specify only basename_in and read files of the form
2672    basefilename_ha.nc, basefilename_ua.nc, basefilename_va.nc containing
2673    relative height, x-velocity and y-velocity, respectively.
2674
2675    Also convert latitude and longitude to UTM. All coordinates are
2676    assumed to be given in the GDA94 datum.
2677
2678    min's and max's: If omitted - full extend is used.
2679    To include a value min may equal it, while max must exceed it.
2680    Lat and lon are assuemd to be in decimal degrees
2681
2682    origin is a 3-tuple with geo referenced
2683    UTM coordinates (zone, easting, northing)
2684
2685    nc format has values organised as HA[TIME, LATITUDE, LONGITUDE]
2686    which means that longitude is the fastest
2687    varying dimension (row major order, so to speak)
2688
2689    ferret2sww uses grid points as vertices in a triangular grid
2690    counting vertices from lower left corner upwards, then right
2691    """
2692
2693    import os
2694    from Scientific.IO.NetCDF import NetCDFFile
2695    from Numeric import Float, Int, Int32, searchsorted, zeros, array
2696    from Numeric import allclose, around
2697
2698    precision = Float
2699
2700    msg = 'Must use latitudes and longitudes for minlat, maxlon etc'
2701
2702    if minlat != None:
2703        assert -90 < minlat < 90 , msg
2704    if maxlat != None:
2705        assert -90 < maxlat < 90 , msg
2706        if minlat != None:
2707            assert maxlat > minlat
2708    if minlon != None:
2709        assert -180 < minlon < 180 , msg
2710    if maxlon != None:
2711        assert -180 < maxlon < 180 , msg
2712        if minlon != None:
2713            assert maxlon > minlon
2714       
2715
2716
2717    #Get NetCDF data
2718    if verbose: print 'Reading files %s_*.nc' %basename_in
2719    #print "basename_in + '_ha.nc'",basename_in + '_ha.nc'
2720    file_h = NetCDFFile(basename_in + '_ha.nc', 'r') #Wave amplitude (cm)
2721    file_u = NetCDFFile(basename_in + '_ua.nc', 'r') #Velocity (x) (cm/s)
2722    file_v = NetCDFFile(basename_in + '_va.nc', 'r') #Velocity (y) (cm/s)
2723    file_e = NetCDFFile(basename_in + '_e.nc', 'r')  #Elevation (z) (m)
2724
2725    if basename_out is None:
2726        swwname = basename_in + '.sww'
2727    else:
2728        swwname = basename_out + '.sww'
2729
2730    # Get dimensions of file_h
2731    for dimension in file_h.dimensions.keys():
2732        if dimension[:3] == 'LON':
2733            dim_h_longitude = dimension
2734        if dimension[:3] == 'LAT':
2735            dim_h_latitude = dimension
2736        if dimension[:4] == 'TIME':
2737            dim_h_time = dimension
2738
2739#    print 'long:', dim_h_longitude
2740#    print 'lats:', dim_h_latitude
2741#    print 'times:', dim_h_time
2742
2743    times = file_h.variables[dim_h_time]
2744    latitudes = file_h.variables[dim_h_latitude]
2745    longitudes = file_h.variables[dim_h_longitude]
2746
2747    kmin, kmax, lmin, lmax = _get_min_max_indexes(latitudes[:],
2748                                                  longitudes[:],
2749                                                  minlat, maxlat,
2750                                                  minlon, maxlon)
2751    # get dimensions for file_e
2752    for dimension in file_e.dimensions.keys():
2753        if dimension[:3] == 'LON':
2754            dim_e_longitude = dimension
2755        if dimension[:3] == 'LAT':
2756            dim_e_latitude = dimension
2757
2758    # get dimensions for file_u
2759    for dimension in file_u.dimensions.keys():
2760        if dimension[:3] == 'LON':
2761            dim_u_longitude = dimension
2762        if dimension[:3] == 'LAT':
2763            dim_u_latitude = dimension
2764        if dimension[:4] == 'TIME':
2765            dim_u_time = dimension
2766           
2767    # get dimensions for file_v
2768    for dimension in file_v.dimensions.keys():
2769        if dimension[:3] == 'LON':
2770            dim_v_longitude = dimension
2771        if dimension[:3] == 'LAT':
2772            dim_v_latitude = dimension
2773        if dimension[:4] == 'TIME':
2774            dim_v_time = dimension
2775
2776
2777    # Precision used by most for lat/lon is 4 or 5 decimals
2778    e_lat = around(file_e.variables[dim_e_latitude][:], 5)
2779    e_lon = around(file_e.variables[dim_e_longitude][:], 5)
2780
2781    # Check that files are compatible
2782    assert allclose(latitudes, file_u.variables[dim_u_latitude])
2783    assert allclose(latitudes, file_v.variables[dim_v_latitude])
2784    assert allclose(latitudes, e_lat)
2785
2786    assert allclose(longitudes, file_u.variables[dim_u_longitude])
2787    assert allclose(longitudes, file_v.variables[dim_v_longitude])
2788    assert allclose(longitudes, e_lon)
2789
2790    if mint is None:
2791        jmin = 0
2792        mint = times[0]       
2793    else:
2794        jmin = searchsorted(times, mint)
2795       
2796    if maxt is None:
2797        jmax = len(times)
2798        maxt = times[-1]
2799    else:
2800        jmax = searchsorted(times, maxt)
2801
2802    #print "latitudes[:]",latitudes[:]
2803    #print "longitudes[:]",longitudes [:]
2804    kmin, kmax, lmin, lmax = _get_min_max_indexes(latitudes[:],
2805                                                  longitudes[:],
2806                                                  minlat, maxlat,
2807                                                  minlon, maxlon)
2808
2809
2810    times = times[jmin:jmax]
2811    latitudes = latitudes[kmin:kmax]
2812    longitudes = longitudes[lmin:lmax]
2813
2814    #print "latitudes[:]",latitudes[:]
2815    #print "longitudes[:]",longitudes [:]
2816
2817    if verbose: print 'cropping'
2818    zname = 'ELEVATION'
2819
2820    amplitudes = file_h.variables['HA'][jmin:jmax, kmin:kmax, lmin:lmax]
2821    uspeed = file_u.variables['UA'][jmin:jmax, kmin:kmax, lmin:lmax] #Lon
2822    vspeed = file_v.variables['VA'][jmin:jmax, kmin:kmax, lmin:lmax] #Lat
2823    elevations = file_e.variables[zname][kmin:kmax, lmin:lmax]
2824
2825    #    if latitudes2[0]==latitudes[0] and latitudes2[-1]==latitudes[-1]:
2826    #        elevations = file_e.variables['ELEVATION'][kmin:kmax, lmin:lmax]
2827    #    elif latitudes2[0]==latitudes[-1] and latitudes2[-1]==latitudes[0]:
2828    #        from Numeric import asarray
2829    #        elevations=elevations.tolist()
2830    #        elevations.reverse()
2831    #        elevations=asarray(elevations)
2832    #    else:
2833    #        from Numeric import asarray
2834    #        elevations=elevations.tolist()
2835    #        elevations.reverse()
2836    #        elevations=asarray(elevations)
2837    #        'print hmmm'
2838
2839
2840
2841    #Get missing values
2842    nan_ha = file_h.variables['HA'].missing_value[0]
2843    nan_ua = file_u.variables['UA'].missing_value[0]
2844    nan_va = file_v.variables['VA'].missing_value[0]
2845    if hasattr(file_e.variables[zname],'missing_value'):
2846        nan_e  = file_e.variables[zname].missing_value[0]
2847    else:
2848        nan_e = None
2849
2850    #Cleanup
2851    from Numeric import sometrue
2852
2853    missing = (amplitudes == nan_ha)
2854    if sometrue (missing):
2855        if fail_on_NaN:
2856            msg = 'NetCDFFile %s contains missing values'\
2857                  %(basename_in+'_ha.nc')
2858            raise DataMissingValuesError, msg
2859        else:
2860            amplitudes = amplitudes*(missing==0) + missing*NaN_filler
2861
2862    missing = (uspeed == nan_ua)
2863    if sometrue (missing):
2864        if fail_on_NaN:
2865            msg = 'NetCDFFile %s contains missing values'\
2866                  %(basename_in+'_ua.nc')
2867            raise DataMissingValuesError, msg
2868        else:
2869            uspeed = uspeed*(missing==0) + missing*NaN_filler
2870
2871    missing = (vspeed == nan_va)
2872    if sometrue (missing):
2873        if fail_on_NaN:
2874            msg = 'NetCDFFile %s contains missing values'\
2875                  %(basename_in+'_va.nc')
2876            raise DataMissingValuesError, msg
2877        else:
2878            vspeed = vspeed*(missing==0) + missing*NaN_filler
2879
2880
2881    missing = (elevations == nan_e)
2882    if sometrue (missing):
2883        if fail_on_NaN:
2884            msg = 'NetCDFFile %s contains missing values'\
2885                  %(basename_in+'_e.nc')
2886            raise DataMissingValuesError, msg
2887        else:
2888            elevations = elevations*(missing==0) + missing*NaN_filler
2889
2890    #######
2891
2892
2893
2894    number_of_times = times.shape[0]
2895    number_of_latitudes = latitudes.shape[0]
2896    number_of_longitudes = longitudes.shape[0]
2897
2898    assert amplitudes.shape[0] == number_of_times
2899    assert amplitudes.shape[1] == number_of_latitudes
2900    assert amplitudes.shape[2] == number_of_longitudes
2901
2902    if verbose:
2903        print '------------------------------------------------'
2904        print 'Statistics:'
2905        print '  Extent (lat/lon):'
2906        print '    lat in [%f, %f], len(lat) == %d'\
2907              %(min(latitudes.flat), max(latitudes.flat),
2908                len(latitudes.flat))
2909        print '    lon in [%f, %f], len(lon) == %d'\
2910              %(min(longitudes.flat), max(longitudes.flat),
2911                len(longitudes.flat))
2912        print '    t in [%f, %f], len(t) == %d'\
2913              %(min(times.flat), max(times.flat), len(times.flat))
2914
2915        q = amplitudes.flat
2916        name = 'Amplitudes (ha) [cm]'
2917        print %s in [%f, %f]' %(name, min(q), max(q))
2918
2919        q = uspeed.flat
2920        name = 'Speeds (ua) [cm/s]'
2921        print %s in [%f, %f]' %(name, min(q), max(q))
2922
2923        q = vspeed.flat
2924        name = 'Speeds (va) [cm/s]'
2925        print %s in [%f, %f]' %(name, min(q), max(q))
2926
2927        q = elevations.flat
2928        name = 'Elevations (e) [m]'
2929        print %s in [%f, %f]' %(name, min(q), max(q))
2930
2931
2932    # print number_of_latitudes, number_of_longitudes
2933    number_of_points = number_of_latitudes*number_of_longitudes
2934    number_of_volumes = (number_of_latitudes-1)*(number_of_longitudes-1)*2
2935
2936
2937    file_h.close()
2938    file_u.close()
2939    file_v.close()
2940    file_e.close()
2941
2942
2943    # NetCDF file definition
2944    outfile = NetCDFFile(swwname, 'w')
2945
2946    description = 'Converted from Ferret files: %s, %s, %s, %s'\
2947                  %(basename_in + '_ha.nc',
2948                    basename_in + '_ua.nc',
2949                    basename_in + '_va.nc',
2950                    basename_in + '_e.nc')
2951   
2952    # Create new file
2953    starttime = times[0]
2954   
2955    sww = Write_sww()
2956    sww.store_header(outfile, times, number_of_volumes,
2957                     number_of_points, description=description,
2958                     verbose=verbose,sww_precision=Float)
2959
2960    # Store
2961    from anuga.coordinate_transforms.redfearn import redfearn
2962    x = zeros(number_of_points, Float)  #Easting
2963    y = zeros(number_of_points, Float)  #Northing
2964
2965
2966    if verbose: print 'Making triangular grid'
2967
2968    # Check zone boundaries
2969    refzone, _, _ = redfearn(latitudes[0],longitudes[0])
2970
2971    vertices = {}
2972    i = 0
2973    for k, lat in enumerate(latitudes):       #Y direction
2974        for l, lon in enumerate(longitudes):  #X direction
2975
2976            vertices[l,k] = i
2977
2978            zone, easting, northing = redfearn(lat,lon)
2979
2980            msg = 'Zone boundary crossed at longitude =', lon
2981            #assert zone == refzone, msg
2982            #print '%7.2f %7.2f %8.2f %8.2f' %(lon, lat, easting, northing)
2983            x[i] = easting
2984            y[i] = northing
2985            i += 1
2986
2987    #Construct 2 triangles per 'rectangular' element
2988    volumes = []
2989    for l in range(number_of_longitudes-1):    #X direction
2990        for k in range(number_of_latitudes-1): #Y direction
2991            v1 = vertices[l,k+1]
2992            v2 = vertices[l,k]
2993            v3 = vertices[l+1,k+1]
2994            v4 = vertices[l+1,k]
2995
2996            volumes.append([v1,v2,v3]) #Upper element
2997            volumes.append([v4,v3,v2]) #Lower element
2998
2999    volumes = array(volumes)
3000
3001    if origin is None:
3002        origin = Geo_reference(refzone,min(x),min(y))
3003    geo_ref = write_NetCDF_georeference(origin, outfile)
3004   
3005    if elevation is not None:
3006        z = elevation
3007    else:
3008        if inverted_bathymetry:
3009            z = -1*elevations
3010        else:
3011            z = elevations
3012    #FIXME: z should be obtained from MOST and passed in here
3013
3014    #FIXME use the Write_sww instance(sww) to write this info
3015    from Numeric import resize
3016    z = resize(z,outfile.variables['z'][:].shape)
3017    outfile.variables['x'][:] = x - geo_ref.get_xllcorner()
3018    outfile.variables['y'][:] = y - geo_ref.get_yllcorner()
3019    outfile.variables['z'][:] = z             #FIXME HACK for bacwards compat.
3020    outfile.variables['elevation'][:] = z
3021    outfile.variables['volumes'][:] = volumes.astype(Int32) #For Opteron 64
3022
3023
3024
3025    #Time stepping
3026    stage = outfile.variables['stage']
3027    xmomentum = outfile.variables['xmomentum']
3028    ymomentum = outfile.variables['ymomentum']
3029
3030    if verbose: print 'Converting quantities'
3031    n = len(times)
3032    for j in range(n):
3033        if verbose and j%((n+10)/10)==0: print '  Doing %d of %d' %(j, n)
3034        i = 0
3035        for k in range(number_of_latitudes):      #Y direction
3036            for l in range(number_of_longitudes): #X direction
3037                w = zscale*amplitudes[j,k,l]/100 + mean_stage
3038                stage[j,i] = w
3039                h = w - z[i]
3040                xmomentum[j,i] = uspeed[j,k,l]/100*h
3041                ymomentum[j,i] = vspeed[j,k,l]/100*h
3042                i += 1
3043
3044    #outfile.close()
3045
3046    #FIXME: Refactor using code from file_function.statistics
3047    #Something like print swwstats(swwname)
3048    if verbose:
3049        x = outfile.variables['x'][:]
3050        y = outfile.variables['y'][:]
3051        print '------------------------------------------------'
3052        print 'Statistics of output file:'
3053        print '  Name: %s' %swwname
3054        print '  Reference:'
3055        print '    Lower left corner: [%f, %f]'\
3056              %(geo_ref.get_xllcorner(), geo_ref.get_yllcorner())
3057        print ' Start time: %f' %starttime
3058        print '    Min time: %f' %mint
3059        print '    Max time: %f' %maxt
3060        print '  Extent:'
3061        print '    x [m] in [%f, %f], len(x) == %d'\
3062              %(min(x.flat), max(x.flat), len(x.flat))
3063        print '    y [m] in [%f, %f], len(y) == %d'\
3064              %(min(y.flat), max(y.flat), len(y.flat))
3065        print '    t [s] in [%f, %f], len(t) == %d'\
3066              %(min(times), max(times), len(times))
3067        print '  Quantities [SI units]:'
3068        for name in ['stage', 'xmomentum', 'ymomentum', 'elevation']:
3069            q = outfile.variables[name][:].flat
3070            print '    %s in [%f, %f]' %(name, min(q), max(q))
3071
3072
3073
3074    outfile.close()
3075
3076
3077
3078
3079
3080def timefile2netcdf(filename, quantity_names=None, time_as_seconds=False):
3081    """Template for converting typical text files with time series to
3082    NetCDF tms file.
3083
3084
3085    The file format is assumed to be either two fields separated by a comma:
3086
3087        time [DD/MM/YY hh:mm:ss], value0 value1 value2 ...
3088
3089    E.g
3090
3091      31/08/04 00:00:00, 1.328223 0 0
3092      31/08/04 00:15:00, 1.292912 0 0
3093
3094    or time (seconds), value0 value1 value2 ...
3095   
3096      0.0, 1.328223 0 0
3097      0.1, 1.292912 0 0
3098
3099    will provide a time dependent function f(t) with three attributes
3100
3101    filename is assumed to be the rootname with extenisons .txt and .sww
3102    """
3103
3104    import time, calendar
3105    from Numeric import array
3106    from anuga.config import time_format
3107    from anuga.utilities.numerical_tools import ensure_numeric
3108
3109
3110
3111    fid = open(filename + '.txt')
3112    line = fid.readline()
3113    fid.close()
3114
3115    fields = line.split(',')
3116    msg = 'File %s must have the format date, value0 value1 value2 ...'
3117    assert len(fields) == 2, msg
3118
3119    if not time_as_seconds:
3120        try:
3121            starttime = calendar.timegm(time.strptime(fields[0], time_format))
3122        except ValueError:
3123            msg = 'First field in file %s must be' %filename
3124            msg += ' date-time with format %s.\n' %time_format
3125            msg += 'I got %s instead.' %fields[0]
3126            raise DataTimeError, msg
3127    else:
3128        try:
3129            starttime = float(fields[0])
3130        except Error:
3131            msg = "Bad time format"
3132            raise DataTimeError, msg
3133
3134
3135    #Split values
3136    values = []
3137    for value in fields[1].split():
3138        values.append(float(value))
3139
3140    q = ensure_numeric(values)
3141
3142    msg = 'ERROR: File must contain at least one independent value'
3143    assert len(q.shape) == 1, msg
3144
3145
3146
3147    #Read times proper
3148    from Numeric import zeros, Float, alltrue
3149    from anuga.config import time_format
3150    import time, calendar
3151
3152    fid = open(filename + '.txt')
3153    lines = fid.readlines()
3154    fid.close()
3155
3156    N = len(lines)
3157    d = len(q)
3158
3159    T = zeros(N, Float)       #Time
3160    Q = zeros((N, d), Float)  #Values
3161
3162    for i, line in enumerate(lines):
3163        fields = line.split(',')
3164        if not time_as_seconds:
3165            realtime = calendar.timegm(time.strptime(fields[0], time_format))
3166        else:
3167             realtime = float(fields[0])
3168        T[i] = realtime - starttime
3169
3170        for j, value in enumerate(fields[1].split()):
3171            Q[i, j] = float(value)
3172
3173    msg = 'File %s must list time as a monotonuosly ' %filename
3174    msg += 'increasing sequence'
3175    assert alltrue( T[1:] - T[:-1] > 0 ), msg
3176
3177    #Create NetCDF file
3178    from Scientific.IO.NetCDF import NetCDFFile
3179
3180    fid = NetCDFFile(filename + '.tms', 'w')
3181
3182
3183    fid.institution = 'Geoscience Australia'
3184    fid.description = 'Time series'
3185
3186
3187    #Reference point
3188    #Start time in seconds since the epoch (midnight 1/1/1970)
3189    #FIXME: Use Georef
3190    fid.starttime = starttime
3191
3192    # dimension definitions
3193    #fid.createDimension('number_of_volumes', self.number_of_volumes)
3194    #fid.createDimension('number_of_vertices', 3)
3195
3196
3197    fid.createDimension('number_of_timesteps', len(T))
3198
3199    fid.createVariable('time', Float, ('number_of_timesteps',))
3200
3201    fid.variables['time'][:] = T
3202
3203    for i in range(Q.shape[1]):
3204        try:
3205            name = quantity_names[i]
3206        except:
3207            name = 'Attribute%d'%i
3208
3209        fid.createVariable(name, Float, ('number_of_timesteps',))
3210        fid.variables[name][:] = Q[:,i]
3211
3212    fid.close()
3213
3214
3215def extent_sww(file_name):
3216    """
3217    Read in an sww file.
3218
3219    Input;
3220    file_name - the sww file
3221
3222    Output;
3223    z - Vector of bed elevation
3224    volumes - Array.  Each row has 3 values, representing
3225    the vertices that define the volume
3226    time - Vector of the times where there is stage information
3227    stage - array with respect to time and vertices (x,y)
3228    """
3229
3230
3231    from Scientific.IO.NetCDF import NetCDFFile
3232
3233    #Check contents
3234    #Get NetCDF
3235    fid = NetCDFFile(file_name, 'r')
3236
3237    # Get the variables
3238    x = fid.variables['x'][:]
3239    y = fid.variables['y'][:]
3240    stage = fid.variables['stage'][:]
3241    #print "stage",stage
3242    #print "stage.shap",stage.shape
3243    #print "min(stage.flat), mpythonax(stage.flat)",min(stage.flat), max(stage.flat)
3244    #print "min(stage)",min(stage)
3245
3246    fid.close()
3247
3248    return [min(x),max(x),min(y),max(y),min(stage.flat),max(stage.flat)]
3249
3250
3251def sww2domain(filename, boundary=None, t=None,
3252               fail_if_NaN=True ,NaN_filler=0,
3253               verbose = False, very_verbose = False):
3254    """
3255    Usage: domain = sww2domain('file.sww',t=time (default = last time in file))
3256
3257    Boundary is not recommended if domain.smooth is not selected, as it
3258    uses unique coordinates, but not unique boundaries. This means that
3259    the boundary file will not be compatable with the coordinates, and will
3260    give a different final boundary, or crash.
3261    """
3262    NaN=9.969209968386869e+036
3263    #initialise NaN.
3264
3265    from Scientific.IO.NetCDF import NetCDFFile
3266    from shallow_water import Domain
3267    from Numeric import asarray, transpose, resize
3268
3269    if verbose: print 'Reading from ', filename
3270    fid = NetCDFFile(filename, 'r')    #Open existing file for read
3271    time = fid.variables['time']       #Timesteps
3272    if t is None:
3273        t = time[-1]
3274    time_interp = get_time_interp(time,t)
3275
3276    # Get the variables as Numeric arrays
3277    x = fid.variables['x'][:]             #x-coordinates of vertices
3278    y = fid.variables['y'][:]             #y-coordinates of vertices
3279    elevation = fid.variables['elevation']     #Elevation
3280    stage = fid.variables['stage']     #Water level
3281    xmomentum = fid.variables['xmomentum']   #Momentum in the x-direction
3282    ymomentum = fid.variables['ymomentum']   #Momentum in the y-direction
3283
3284    starttime = fid.starttime[0]
3285    volumes = fid.variables['volumes'][:] #Connectivity
3286    coordinates = transpose(asarray([x.tolist(),y.tolist()]))
3287    #FIXME (Ole): Something like this might be better: concatenate( (x, y), axis=1 )
3288    # or concatenate( (x[:,NewAxis],x[:,NewAxis]), axis=1 )
3289
3290    conserved_quantities = []
3291    interpolated_quantities = {}
3292    other_quantities = []
3293
3294    # get geo_reference
3295    #sww files don't have to have a geo_ref
3296    try:
3297        geo_reference = Geo_reference(NetCDFObject=fid)
3298    except: #AttributeError, e:
3299        geo_reference = None
3300
3301    if verbose: print '    getting quantities'
3302    for quantity in fid.variables.keys():
3303        dimensions = fid.variables[quantity].dimensions
3304        if 'number_of_timesteps' in dimensions:
3305            conserved_quantities.append(quantity)
3306            interpolated_quantities[quantity]=\
3307                  interpolated_quantity(fid.variables[quantity][:],time_interp)
3308        else: other_quantities.append(quantity)
3309
3310    other_quantities.remove('x')
3311    other_quantities.remove('y')
3312    other_quantities.remove('z')
3313    other_quantities.remove('volumes')
3314    try:
3315        other_quantities.remove('stage_range')
3316        other_quantities.remove('xmomentum_range')
3317        other_quantities.remove('ymomentum_range')
3318        other_quantities.remove('elevation_range')
3319    except:
3320        pass
3321       
3322
3323    conserved_quantities.remove('time')
3324
3325    if verbose: print '    building domain'
3326    #    From domain.Domain:
3327    #    domain = Domain(coordinates, volumes,\
3328    #                    conserved_quantities = conserved_quantities,\
3329    #                    other_quantities = other_quantities,zone=zone,\
3330    #                    xllcorner=xllcorner, yllcorner=yllcorner)
3331
3332    #   From shallow_water.Domain:
3333    coordinates=coordinates.tolist()
3334    volumes=volumes.tolist()
3335    #FIXME:should this be in mesh?(peter row)
3336    if fid.smoothing == 'Yes': unique = False
3337    else: unique = True
3338    if unique:
3339        coordinates,volumes,boundary=weed(coordinates,volumes,boundary)
3340
3341
3342    try:
3343        domain = Domain(coordinates, volumes, boundary)
3344    except AssertionError, e:
3345        fid.close()
3346        msg = 'Domain could not be created: %s. Perhaps use "fail_if_NaN=False and NaN_filler = ..."' %e
3347        raise DataDomainError, msg
3348
3349    if not boundary is None:
3350        domain.boundary = boundary
3351
3352    domain.geo_reference = geo_reference
3353
3354    domain.starttime=float(starttime)+float(t)
3355    domain.time=0.0
3356
3357    for quantity in other_quantities:
3358        try:
3359            NaN = fid.variables[quantity].missing_value
3360        except:
3361            pass #quantity has no missing_value number
3362        X = fid.variables[quantity][:]
3363        if very_verbose:
3364            print '       ',quantity
3365            print '        NaN =',NaN
3366            print '        max(X)'
3367            print '       ',max(X)
3368            print '        max(X)==NaN'
3369            print '       ',max(X)==NaN
3370            print ''
3371        if (max(X)==NaN) or (min(X)==NaN):
3372            if fail_if_NaN:
3373                msg = 'quantity "%s" contains no_data entry'%quantity
3374                raise DataMissingValuesError, msg
3375            else:
3376                data = (X<>NaN)
3377                X = (X*data)+(data==0)*NaN_filler
3378        if unique:
3379            X = resize(X,(len(X)/3,3))
3380        domain.set_quantity(quantity,X)
3381    #
3382    for quantity in conserved_quantities:
3383        try:
3384            NaN = fid.variables[quantity].missing_value
3385        except:
3386            pass #quantity has no missing_value number
3387        X = interpolated_quantities[quantity]
3388        if very_verbose:
3389            print '       ',quantity
3390            print '        NaN =',NaN
3391            print '        max(X)'
3392            print '       ',max(X)
3393            print '        max(X)==NaN'
3394            print '       ',max(X)==NaN
3395            print ''
3396        if (max(X)==NaN) or (min(X)==NaN):
3397            if fail_if_NaN:
3398                msg = 'quantity "%s" contains no_data entry'%quantity
3399                raise DataMissingValuesError, msg
3400            else:
3401                data = (X<>NaN)
3402                X = (X*data)+(data==0)*NaN_filler
3403        if unique:
3404            X = resize(X,(X.shape[0]/3,3))
3405        domain.set_quantity(quantity,X)
3406
3407    fid.close()
3408    return domain
3409
3410
3411def interpolated_quantity(saved_quantity,time_interp):
3412
3413    #given an index and ratio, interpolate quantity with respect to time.
3414    index,ratio = time_interp
3415    Q = saved_quantity
3416    if ratio > 0:
3417        q = (1-ratio)*Q[index]+ ratio*Q[index+1]
3418    else:
3419        q = Q[index]
3420    #Return vector of interpolated values
3421    return q
3422
3423
3424def get_time_interp(time,t=None):
3425    #Finds the ratio and index for time interpolation.
3426    #It is borrowed from previous abstract_2d_finite_volumes code.
3427    if t is None:
3428        t=time[-1]
3429        index = -1
3430        ratio = 0.
3431    else:
3432        T = time
3433        tau = t
3434        index=0
3435        msg = 'Time interval derived from file %s [%s:%s]'\
3436            %('FIXMEfilename', T[0], T[-1])
3437        msg += ' does not match model time: %s' %tau
3438        if tau < time[0]: raise DataTimeError, msg
3439        if tau > time[-1]: raise DataTimeError, msg
3440        while tau > time[index]: index += 1
3441        while tau < time[index]: index -= 1
3442        if tau == time[index]:
3443            #Protect against case where tau == time[-1] (last time)
3444            # - also works in general when tau == time[i]
3445            ratio = 0
3446        else:
3447            #t is now between index and index+1
3448            ratio = (tau - time[index])/(time[index+1] - time[index])
3449    return (index,ratio)
3450
3451
3452def weed(coordinates,volumes,boundary = None):
3453    if type(coordinates)==ArrayType:
3454        coordinates = coordinates.tolist()
3455    if type(volumes)==ArrayType:
3456        volumes = volumes.tolist()
3457
3458    unique = False
3459    point_dict = {}
3460    same_point = {}
3461    for i in range(len(coordinates)):
3462        point = tuple(coordinates[i])
3463        if point_dict.has_key(point):
3464            unique = True
3465            same_point[i]=point
3466            #to change all point i references to point j
3467        else:
3468            point_dict[point]=i
3469            same_point[i]=point
3470
3471    coordinates = []
3472    i = 0
3473    for point in point_dict.keys():
3474        point = tuple(point)
3475        coordinates.append(list(point))
3476        point_dict[point]=i
3477        i+=1
3478
3479
3480    for volume in volumes:
3481        for i in range(len(volume)):
3482            index = volume[i]
3483            if index>-1:
3484                volume[i]=point_dict[same_point[index]]
3485
3486    new_boundary = {}
3487    if not boundary is None:
3488        for segment in boundary.keys():
3489            point0 = point_dict[same_point[segment[0]]]
3490            point1 = point_dict[same_point[segment[1]]]
3491            label = boundary[segment]
3492            #FIXME should the bounday attributes be concaterated
3493            #('exterior, pond') or replaced ('pond')(peter row)
3494
3495            if new_boundary.has_key((point0,point1)):
3496                new_boundary[(point0,point1)]=new_boundary[(point0,point1)]#\
3497                                              #+','+label
3498
3499            elif new_boundary.has_key((point1,point0)):
3500                new_boundary[(point1,point0)]=new_boundary[(point1,point0)]#\
3501                                              #+','+label
3502            else: new_boundary[(point0,point1)]=label
3503
3504        boundary = new_boundary
3505
3506    return coordinates,volumes,boundary
3507
3508
3509def decimate_dem(basename_in, stencil, cellsize_new, basename_out=None,
3510                 verbose=False):
3511    """Read Digitial Elevation model from the following NetCDF format (.dem)
3512
3513    Example:
3514
3515    ncols         3121
3516    nrows         1800
3517    xllcorner     722000
3518    yllcorner     5893000
3519    cellsize      25
3520    NODATA_value  -9999
3521    138.3698 137.4194 136.5062 135.5558 ..........
3522
3523    Decimate data to cellsize_new using stencil and write to NetCDF dem format.
3524    """
3525
3526    import os
3527    from Scientific.IO.NetCDF import NetCDFFile
3528    from Numeric import Float, zeros, sum, reshape, equal
3529
3530    root = basename_in
3531    inname = root + '.dem'
3532
3533    #Open existing netcdf file to read
3534    infile = NetCDFFile(inname, 'r')
3535    if verbose: print 'Reading DEM from %s' %inname
3536
3537    #Read metadata
3538    ncols = infile.ncols[0]
3539    nrows = infile.nrows[0]
3540    xllcorner = infile.xllcorner[0]
3541    yllcorner = infile.yllcorner[0]
3542    cellsize = infile.cellsize[0]
3543    NODATA_value = infile.NODATA_value[0]
3544    zone = infile.zone[0]
3545    false_easting = infile.false_easting[0]
3546    false_northing = infile.false_northing[0]
3547    projection = infile.projection
3548    datum = infile.datum
3549    units = infile.units
3550
3551    dem_elevation = infile.variables['elevation']
3552
3553    #Get output file name
3554    if basename_out == None:
3555        outname = root + '_' + repr(cellsize_new) + '.dem'
3556    else:
3557        outname = basename_out + '.dem'
3558
3559    if verbose: print 'Write decimated NetCDF file to %s' %outname
3560
3561    #Determine some dimensions for decimated grid
3562    (nrows_stencil, ncols_stencil) = stencil.shape
3563    x_offset = ncols_stencil / 2
3564    y_offset = nrows_stencil / 2
3565    cellsize_ratio = int(cellsize_new / cellsize)
3566    ncols_new = 1 + (ncols - ncols_stencil) / cellsize_ratio
3567    nrows_new = 1 + (nrows - nrows_stencil) / cellsize_ratio
3568
3569    #Open netcdf file for output
3570    outfile = NetCDFFile(outname, 'w')
3571
3572    #Create new file
3573    outfile.institution = 'Geoscience Australia'
3574    outfile.description = 'NetCDF DEM format for compact and portable storage ' +\
3575                           'of spatial point data'
3576    #Georeferencing
3577    outfile.zone = zone
3578    outfile.projection = projection
3579    outfile.datum = datum
3580    outfile.units = units
3581
3582    outfile.cellsize = cellsize_new
3583    outfile.NODATA_value = NODATA_value
3584    outfile.false_easting = false_easting
3585    outfile.false_northing = false_northing
3586
3587    outfile.xllcorner = xllcorner + (x_offset * cellsize)
3588    outfile.yllcorner = yllcorner + (y_offset * cellsize)
3589    outfile.ncols = ncols_new
3590    outfile.nrows = nrows_new
3591
3592    # dimension definition
3593    outfile.createDimension('number_of_points', nrows_new*ncols_new)
3594
3595    # variable definition
3596    outfile.createVariable('elevation', Float, ('number_of_points',))
3597
3598    # Get handle to the variable
3599    elevation = outfile.variables['elevation']
3600
3601    dem_elevation_r = reshape(dem_elevation, (nrows, ncols))
3602
3603    #Store data
3604    global_index = 0
3605    for i in range(nrows_new):
3606        if verbose: print 'Processing row %d of %d' %(i, nrows_new)
3607        lower_index = global_index
3608        telev =  zeros(ncols_new, Float)
3609        local_index = 0
3610        trow = i * cellsize_ratio
3611
3612        for j in range(ncols_new):
3613            tcol = j * cellsize_ratio
3614            tmp = dem_elevation_r[trow:trow+nrows_stencil, tcol:tcol+ncols_stencil]
3615
3616            #if dem contains 1 or more NODATA_values set value in
3617            #decimated dem to NODATA_value, else compute decimated
3618            #value using stencil
3619            if sum(sum(equal(tmp, NODATA_value))) > 0:
3620                telev[local_index] = NODATA_value
3621            else:
3622                telev[local_index] = sum(sum(tmp * stencil))
3623
3624            global_index += 1
3625            local_index += 1
3626
3627        upper_index = global_index
3628
3629        elevation[lower_index:upper_index] = telev
3630
3631    assert global_index == nrows_new*ncols_new, 'index not equal to number of points'
3632
3633    infile.close()
3634    outfile.close()
3635
3636
3637
3638
3639def tsh2sww(filename, verbose=False): 
3640    """
3641    to check if a tsh/msh file 'looks' good.
3642    """
3643
3644
3645    if verbose == True:print 'Creating domain from', filename
3646    domain = pmesh_to_domain_instance(filename, Domain)
3647    if verbose == True:print "Number of triangles = ", len(domain)
3648
3649    domain.smooth = True
3650    domain.format = 'sww'   #Native netcdf visualisation format
3651    file_path, filename = path.split(filename)
3652    filename, ext = path.splitext(filename)
3653    domain.set_name(filename)   
3654    domain.reduction = mean
3655    if verbose == True:print "file_path",file_path
3656    if file_path == "":file_path = "."
3657    domain.set_datadir(file_path)
3658
3659    if verbose == True:
3660        print "Output written to " + domain.get_datadir() + sep + \
3661              domain.get_name() + "." + domain.format
3662    sww = get_dataobject(domain)
3663    sww.store_connectivity()
3664    sww.store_timestep()
3665
3666
3667def asc_csiro2sww(bath_dir,
3668                  elevation_dir,
3669                  ucur_dir,
3670                  vcur_dir,
3671                  sww_file,
3672                  minlat = None, maxlat = None,
3673                  minlon = None, maxlon = None,
3674                  zscale=1,
3675                  mean_stage = 0,
3676                  fail_on_NaN = True,
3677                  elevation_NaN_filler = 0,
3678                  bath_prefix='ba',
3679                  elevation_prefix='el',
3680                  verbose=False):
3681    """
3682    Produce an sww boundary file, from esri ascii data from CSIRO.
3683
3684    Also convert latitude and longitude to UTM. All coordinates are
3685    assumed to be given in the GDA94 datum.
3686
3687    assume:
3688    All files are in esri ascii format
3689
3690    4 types of information
3691    bathymetry
3692    elevation
3693    u velocity
3694    v velocity
3695
3696    Assumptions
3697    The metadata of all the files is the same
3698    Each type is in a seperate directory
3699    One bath file with extention .000
3700    The time period is less than 24hrs and uniform.
3701    """
3702    from Scientific.IO.NetCDF import NetCDFFile
3703
3704    from anuga.coordinate_transforms.redfearn import redfearn
3705
3706    precision = Float # So if we want to change the precision its done here
3707
3708    # go in to the bath dir and load the only file,
3709    bath_files = os.listdir(bath_dir)
3710
3711    bath_file = bath_files[0]
3712    bath_dir_file =  bath_dir + os.sep + bath_file
3713    bath_metadata,bath_grid =  _read_asc(bath_dir_file)
3714
3715    #Use the date.time of the bath file as a basis for
3716    #the start time for other files
3717    base_start = bath_file[-12:]
3718
3719    #go into the elevation dir and load the 000 file
3720    elevation_dir_file = elevation_dir  + os.sep + elevation_prefix \
3721                         + base_start
3722
3723    elevation_files = os.listdir(elevation_dir)
3724    ucur_files = os.listdir(ucur_dir)
3725    vcur_files = os.listdir(vcur_dir)
3726    elevation_files.sort()
3727    # the first elevation file should be the
3728    # file with the same base name as the bath data
3729    assert elevation_files[0] == 'el' + base_start
3730
3731    number_of_latitudes = bath_grid.shape[0]
3732    number_of_longitudes = bath_grid.shape[1]
3733    number_of_volumes = (number_of_latitudes-1)*(number_of_longitudes-1)*2
3734
3735    longitudes = [bath_metadata['xllcorner']+x*bath_metadata['cellsize'] \
3736                  for x in range(number_of_longitudes)]
3737    latitudes = [bath_metadata['yllcorner']+y*bath_metadata['cellsize'] \
3738                 for y in range(number_of_latitudes)]
3739
3740     # reverse order of lat, so the fist lat represents the first grid row
3741    latitudes.reverse()
3742
3743    kmin, kmax, lmin, lmax = _get_min_max_indexes(latitudes[:],longitudes[:],
3744                                                 minlat=minlat, maxlat=maxlat,
3745                                                 minlon=minlon, maxlon=maxlon)
3746
3747
3748    bath_grid = bath_grid[kmin:kmax,lmin:lmax]
3749    latitudes = latitudes[kmin:kmax]
3750    longitudes = longitudes[lmin:lmax]
3751    number_of_latitudes = len(latitudes)
3752    number_of_longitudes = len(longitudes)
3753    number_of_times = len(os.listdir(elevation_dir))
3754    number_of_points = number_of_latitudes*number_of_longitudes
3755    number_of_volumes = (number_of_latitudes-1)*(number_of_longitudes-1)*2
3756
3757    #Work out the times
3758    if len(elevation_files) > 1:
3759        # Assume: The time period is less than 24hrs.
3760        time_period = (int(elevation_files[1][-3:]) - \
3761                      int(elevation_files[0][-3:]))*60*60
3762        times = [x*time_period for x in range(len(elevation_files))]
3763    else:
3764        times = [0.0]
3765
3766
3767    if verbose:
3768        print '------------------------------------------------'
3769        print 'Statistics:'
3770        print '  Extent (lat/lon):'
3771        print '    lat in [%f, %f], len(lat) == %d'\
3772              %(min(latitudes), max(latitudes),
3773                len(latitudes))
3774        print '    lon in [%f, %f], len(lon) == %d'\
3775              %(min(longitudes), max(longitudes),
3776                len(longitudes))
3777        print '    t in [%f, %f], len(t) == %d'\
3778              %(min(times), max(times), len(times))
3779
3780    ######### WRITE THE SWW FILE #############
3781    # NetCDF file definition
3782    outfile = NetCDFFile(sww_file, 'w')
3783
3784    #Create new file
3785    outfile.institution = 'Geoscience Australia'
3786    outfile.description = 'Converted from XXX'
3787
3788
3789    #For sww compatibility
3790    outfile.smoothing = 'Yes'
3791    outfile.order = 1
3792
3793    #Start time in seconds since the epoch (midnight 1/1/1970)
3794    outfile.starttime = starttime = times[0]
3795
3796
3797    # dimension definitions
3798    outfile.createDimension('number_of_volumes', number_of_volumes)
3799
3800    outfile.createDimension('number_of_vertices', 3)
3801    outfile.createDimension('number_of_points', number_of_points)
3802    outfile.createDimension('number_of_timesteps', number_of_times)
3803
3804    # variable definitions
3805    outfile.createVariable('x', precision, ('number_of_points',))
3806    outfile.createVariable('y', precision, ('number_of_points',))
3807    outfile.createVariable('elevation', precision, ('number_of_points',))
3808
3809    #FIXME: Backwards compatibility
3810    outfile.createVariable('z', precision, ('number_of_points',))
3811    #################################
3812
3813    outfile.createVariable('volumes', Int, ('number_of_volumes',
3814                                            'number_of_vertices'))
3815
3816    outfile.createVariable('time', precision,
3817                           ('number_of_timesteps',))
3818
3819    outfile.createVariable('stage', precision,
3820                           ('number_of_timesteps',
3821                            'number_of_points'))
3822
3823    outfile.createVariable('xmomentum', precision,
3824                           ('number_of_timesteps',
3825                            'number_of_points'))
3826
3827    outfile.createVariable('ymomentum', precision,
3828                           ('number_of_timesteps',
3829                            'number_of_points'))
3830
3831    #Store
3832    from anuga.coordinate_transforms.redfearn import redfearn
3833    x = zeros(number_of_points, Float)  #Easting
3834    y = zeros(number_of_points, Float)  #Northing
3835
3836    if verbose: print 'Making triangular grid'
3837    #Get zone of 1st point.
3838    refzone, _, _ = redfearn(latitudes[0],longitudes[0])
3839
3840    vertices = {}
3841    i = 0
3842    for k, lat in enumerate(latitudes):
3843        for l, lon in enumerate(longitudes):
3844
3845            vertices[l,k] = i
3846
3847            zone, easting, northing = redfearn(lat,lon)
3848
3849            msg = 'Zone boundary crossed at longitude =', lon
3850            #assert zone == refzone, msg
3851            #print '%7.2f %7.2f %8.2f %8.2f' %(lon, lat, easting, northing)
3852            x[i] = easting
3853            y[i] = northing
3854            i += 1
3855
3856
3857    #Construct 2 triangles per 'rectangular' element
3858    volumes = []
3859    for l in range(number_of_longitudes-1):    #X direction
3860        for k in range(number_of_latitudes-1): #Y direction
3861            v1 = vertices[l,k+1]
3862            v2 = vertices[l,k]
3863            v3 = vertices[l+1,k+1]
3864            v4 = vertices[l+1,k]
3865
3866            #Note, this is different to the ferrit2sww code
3867            #since the order of the lats is reversed.
3868            volumes.append([v1,v3,v2]) #Upper element
3869            volumes.append([v4,v2,v3]) #Lower element
3870
3871    volumes = array(volumes)
3872
3873    geo_ref = Geo_reference(refzone,min(x),min(y))
3874    geo_ref.write_NetCDF(outfile)
3875
3876    # This will put the geo ref in the middle
3877    #geo_ref = Geo_reference(refzone,(max(x)+min(x))/2.0,(max(x)+min(y))/2.)
3878
3879
3880    if verbose:
3881        print '------------------------------------------------'
3882        print 'More Statistics:'
3883        print '  Extent (/lon):'
3884        print '    x in [%f, %f], len(lat) == %d'\
3885              %(min(x), max(x),
3886                len(x))
3887        print '    y in [%f, %f], len(lon) == %d'\
3888              %(min(y), max(y),
3889                len(y))
3890        print 'geo_ref: ',geo_ref
3891
3892    z = resize(bath_grid,outfile.variables['z'][:].shape)
3893    outfile.variables['x'][:] = x - geo_ref.get_xllcorner()
3894    outfile.variables['y'][:] = y - geo_ref.get_yllcorner()
3895    outfile.variables['z'][:] = z # FIXME (Ole): Remove once viewer has been recompiled and changed to use elevation instead of z
3896    outfile.variables['elevation'][:] = z 
3897    outfile.variables['volumes'][:] = volumes.astype(Int32) # On Opteron 64
3898
3899    stage = outfile.variables['stage']
3900    xmomentum = outfile.variables['xmomentum']
3901    ymomentum = outfile.variables['ymomentum']
3902
3903    outfile.variables['time'][:] = times   #Store time relative
3904
3905    if verbose: print 'Converting quantities'
3906    n = number_of_times
3907    for j in range(number_of_times):
3908        # load in files
3909        elevation_meta, elevation_grid = \
3910           _read_asc(elevation_dir + os.sep + elevation_files[j])
3911
3912        _, u_momentum_grid =  _read_asc(ucur_dir + os.sep + ucur_files[j])
3913        _, v_momentum_grid =  _read_asc(vcur_dir + os.sep + vcur_files[j])
3914
3915        #cut matrix to desired size
3916        elevation_grid = elevation_grid[kmin:kmax,lmin:lmax]
3917        u_momentum_grid = u_momentum_grid[kmin:kmax,lmin:lmax]
3918        v_momentum_grid = v_momentum_grid[kmin:kmax,lmin:lmax]
3919       
3920        # handle missing values
3921        missing = (elevation_grid == elevation_meta['NODATA_value'])
3922        if sometrue (missing):
3923            if fail_on_NaN:
3924                msg = 'File %s contains missing values'\
3925                      %(elevation_files[j])
3926                raise DataMissingValuesError, msg
3927            else:
3928                elevation_grid = elevation_grid*(missing==0) + \
3929                                 missing*elevation_NaN_filler
3930
3931
3932        if verbose and j%((n+10)/10)==0: print '  Doing %d of %d' %(j, n)
3933        i = 0
3934        for k in range(number_of_latitudes):      #Y direction
3935            for l in range(number_of_longitudes): #X direction
3936                w = zscale*elevation_grid[k,l] + mean_stage
3937                stage[j,i] = w
3938                h = w - z[i]
3939                xmomentum[j,i] = u_momentum_grid[k,l]*h
3940                ymomentum[j,i] = v_momentum_grid[k,l]*h
3941                i += 1
3942    outfile.close()
3943
3944def _get_min_max_indexes(latitudes_ref,longitudes_ref,
3945                        minlat=None, maxlat=None,
3946                        minlon=None, maxlon=None):
3947    """
3948    return max, min indexes (for slicing) of the lat and long arrays to cover the area
3949    specified with min/max lat/long
3950
3951    Think of the latitudes and longitudes describing a 2d surface.
3952    The area returned is, if possible, just big enough to cover the
3953    inputed max/min area. (This will not be possible if the max/min area
3954    has a section outside of the latitudes/longitudes area.)
3955
3956    asset  longitudes are sorted,
3957    long - from low to high (west to east, eg 148 - 151)
3958    assert latitudes are sorted, ascending or decending
3959    """
3960    latitudes = latitudes_ref[:]
3961    longitudes = longitudes_ref[:]
3962
3963    latitudes = ensure_numeric(latitudes)
3964    longitudes = ensure_numeric(longitudes)
3965
3966    assert allclose(sort(longitudes), longitudes)
3967
3968    #print latitudes[0],longitudes[0]
3969    #print len(latitudes),len(longitudes)
3970    #print latitudes[len(latitudes)-1],longitudes[len(longitudes)-1]
3971   
3972    lat_ascending = True
3973    if not allclose(sort(latitudes), latitudes):
3974        lat_ascending = False
3975        # reverse order of lat, so it's in ascending order         
3976        latitudes = latitudes[::-1]
3977        assert allclose(sort(latitudes), latitudes)
3978    #print "latitudes  in funct", latitudes
3979   
3980    largest_lat_index = len(latitudes)-1
3981    #Cut out a smaller extent.
3982    if minlat == None:
3983        lat_min_index = 0
3984    else:
3985        lat_min_index = searchsorted(latitudes, minlat)-1
3986        if lat_min_index <0:
3987            lat_min_index = 0
3988
3989
3990    if maxlat == None:
3991        lat_max_index = largest_lat_index #len(latitudes)
3992    else:
3993        lat_max_index = searchsorted(latitudes, maxlat)
3994        if lat_max_index > largest_lat_index:
3995            lat_max_index = largest_lat_index
3996
3997    if minlon == None:
3998        lon_min_index = 0
3999    else:
4000        lon_min_index = searchsorted(longitudes, minlon)-1
4001        if lon_min_index <0:
4002            lon_min_index = 0
4003
4004    if maxlon == None:
4005        lon_max_index = len(longitudes)
4006    else:
4007        lon_max_index = searchsorted(longitudes, maxlon)
4008
4009    # Reversing the indexes, if the lat array is decending
4010    if lat_ascending is False:
4011        lat_min_index, lat_max_index = largest_lat_index - lat_max_index , \
4012                                       largest_lat_index - lat_min_index
4013    lat_max_index = lat_max_index + 1 # taking into account how slicing works
4014    lon_max_index = lon_max_index + 1 # taking into account how slicing works
4015
4016    return lat_min_index, lat_max_index, lon_min_index, lon_max_index
4017
4018
4019def _read_asc(filename, verbose=False):
4020    """Read esri file from the following ASCII format (.asc)
4021
4022    Example:
4023
4024    ncols         3121
4025    nrows         1800
4026    xllcorner     722000
4027    yllcorner     5893000
4028    cellsize      25
4029    NODATA_value  -9999
4030    138.3698 137.4194 136.5062 135.5558 ..........
4031
4032    """
4033
4034    datafile = open(filename)
4035
4036    if verbose: print 'Reading DEM from %s' %(filename)
4037    lines = datafile.readlines()
4038    datafile.close()
4039
4040    if verbose: print 'Got', len(lines), ' lines'
4041
4042    ncols = int(lines.pop(0).split()[1].strip())
4043    nrows = int(lines.pop(0).split()[1].strip())
4044    xllcorner = float(lines.pop(0).split()[1].strip())
4045    yllcorner = float(lines.pop(0).split()[1].strip())
4046    cellsize = float(lines.pop(0).split()[1].strip())
4047    NODATA_value = float(lines.pop(0).split()[1].strip())
4048
4049    assert len(lines) == nrows
4050
4051    #Store data
4052    grid = []
4053
4054    n = len(lines)
4055    for i, line in enumerate(lines):
4056        cells = line.split()
4057        assert len(cells) == ncols
4058        grid.append(array([float(x) for x in cells]))
4059    grid = array(grid)
4060
4061    return {'xllcorner':xllcorner,
4062            'yllcorner':yllcorner,
4063            'cellsize':cellsize,
4064            'NODATA_value':NODATA_value}, grid
4065
4066
4067
4068    ####  URS 2 SWW  ###
4069
4070lon_name = 'LON'
4071lat_name = 'LAT'
4072time_name = 'TIME'
4073precision = Float # So if we want to change the precision its done here       
4074class Write_nc:
4075    """
4076    Write an nc file.
4077   
4078    Note, this should be checked to meet cdc netcdf conventions for gridded
4079    data. http://www.cdc.noaa.gov/cdc/conventions/cdc_netcdf_standard.shtml
4080   
4081    """
4082    def __init__(self,
4083                 quantity_name,
4084                 file_name,
4085                 time_step_count,
4086                 time_step,
4087                 lon,
4088                 lat):
4089        """
4090        time_step_count is the number of time steps.
4091        time_step is the time step size
4092       
4093        pre-condition: quantity_name must be 'HA' 'UA'or 'VA'.
4094        """
4095        self.quantity_name = quantity_name
4096        quantity_units = {'HA':'CENTIMETERS',
4097                              'UA':'CENTIMETERS/SECOND',
4098                              'VA':'CENTIMETERS/SECOND'}       
4099       
4100        multiplier_dic = {'HA':100.0, # To convert from m to cm
4101                              'UA':100.0,  #  m/s to cm/sec
4102                              'VA':-100.0}  # MUX files have positve x in the
4103        # Southern direction.  This corrects for it, when writing nc files.
4104       
4105        self.quantity_multiplier =  multiplier_dic[self.quantity_name]
4106       
4107        #self.file_name = file_name
4108        self.time_step_count = time_step_count
4109        self.time_step = time_step
4110
4111        # NetCDF file definition
4112        self.outfile = NetCDFFile(file_name, 'w')
4113        outfile = self.outfile       
4114
4115        #Create new file
4116        nc_lon_lat_header(outfile, lon, lat)
4117   
4118        # TIME
4119        outfile.createDimension(time_name, None)
4120        outfile.createVariable(time_name, precision, (time_name,))
4121
4122        #QUANTITY
4123        outfile.createVariable(self.quantity_name, precision,
4124                               (time_name, lat_name, lon_name))
4125        outfile.variables[self.quantity_name].missing_value=-1.e+034
4126        outfile.variables[self.quantity_name].units= \
4127                                 quantity_units[self.quantity_name]
4128        outfile.variables[lon_name][:]= ensure_numeric(lon)
4129        outfile.variables[lat_name][:]= ensure_numeric(lat)
4130
4131        #Assume no one will be wanting to read this, while we are writing
4132        #outfile.close()
4133       
4134    def store_timestep(self, quantity_slice):
4135        """
4136        Write a time slice of quantity info
4137        quantity_slice is the data to be stored at this time step
4138        """
4139       
4140        outfile = self.outfile
4141       
4142        # Get the variables
4143        time = outfile.variables[time_name]
4144        quantity = outfile.variables[self.quantity_name]
4145           
4146        i = len(time)
4147
4148        #Store time
4149        time[i] = i*self.time_step #self.domain.time
4150        quantity[i,:] = quantity_slice* self.quantity_multiplier
4151       
4152    def close(self):
4153        self.outfile.close()
4154
4155def urs2sww(basename_in='o', basename_out=None, verbose=False,
4156            remove_nc_files=True,
4157            minlat=None, maxlat=None,
4158            minlon= None, maxlon=None,
4159            mint=None, maxt=None,
4160            mean_stage=0,
4161            origin = None,
4162            zscale=1,
4163            fail_on_NaN=True,
4164            NaN_filler=0,
4165            elevation=None):
4166    """
4167    Convert URS C binary format for wave propagation to
4168    sww format native to abstract_2d_finite_volumes.
4169
4170    Specify only basename_in and read files of the form
4171    basefilename-z-mux2, basefilename-e-mux2 and
4172    basefilename-n-mux2 containing relative height,
4173    x-velocity and y-velocity, respectively.
4174
4175    Also convert latitude and longitude to UTM. All coordinates are
4176    assumed to be given in the GDA94 datum. The latitude and longitude
4177    information is for  a grid.
4178
4179    min's and max's: If omitted - full extend is used.
4180    To include a value min may equal it, while max must exceed it.
4181    Lat and lon are assumed to be in decimal degrees.
4182    NOTE: minlon is the most east boundary.
4183   
4184    origin is a 3-tuple with geo referenced
4185    UTM coordinates (zone, easting, northing)
4186    It will be the origin of the sww file. This shouldn't be used,
4187    since all of anuga should be able to handle an arbitary origin.
4188
4189
4190    URS C binary format has data orgainised as TIME, LONGITUDE, LATITUDE
4191    which means that latitude is the fastest
4192    varying dimension (row major order, so to speak)
4193
4194    In URS C binary the latitudes and longitudes are in assending order.
4195    """
4196    if basename_out == None:
4197        basename_out = basename_in
4198    files_out = urs2nc(basename_in, basename_out)
4199    ferret2sww(basename_out,
4200               minlat=minlat,
4201               maxlat=maxlat,
4202               minlon=minlon,
4203               maxlon=maxlon,
4204               mint=mint,
4205               maxt=maxt,
4206               mean_stage=mean_stage,
4207               origin=origin,
4208               zscale=zscale,
4209               fail_on_NaN=fail_on_NaN,
4210               NaN_filler=NaN_filler,
4211               inverted_bathymetry=True,
4212               verbose=verbose)
4213    #print "files_out",files_out
4214    if remove_nc_files:
4215        for file_out in files_out:
4216            os.remove(file_out)
4217   
4218def urs2nc(basename_in = 'o', basename_out = 'urs'):
4219    """
4220    Convert the 3 urs files to 4 nc files.
4221
4222    The name of the urs file names must be;
4223    [basename_in]-z-mux
4224    [basename_in]-e-mux
4225    [basename_in]-n-mux
4226   
4227    """
4228   
4229    files_in = [basename_in + WAVEHEIGHT_MUX_LABEL,
4230                basename_in + EAST_VELOCITY_LABEL,
4231                basename_in + NORTH_VELOCITY_LABEL]
4232    files_out = [basename_out+'_ha.nc',
4233                 basename_out+'_ua.nc',
4234                 basename_out+'_va.nc']
4235    quantities = ['HA','UA','VA']
4236
4237    #if os.access(files_in[0]+'.mux', os.F_OK) == 0 :
4238    for i, file_name in enumerate(files_in):
4239        if os.access(file_name, os.F_OK) == 0:
4240            if os.access(file_name+'.mux', os.F_OK) == 0 :
4241                msg = 'File %s does not exist or is not accessible' %file_name
4242                raise IOError, msg
4243            else:
4244               files_in[i] += '.mux'
4245               print "file_name", file_name
4246    hashed_elevation = None
4247    for file_in, file_out, quantity in map(None, files_in,
4248                                           files_out,
4249                                           quantities):
4250        lonlatdep, lon, lat, depth = _binary_c2nc(file_in,
4251                                         file_out,
4252                                         quantity)
4253        #print "lonlatdep", lonlatdep
4254        if hashed_elevation == None:
4255            elevation_file = basename_out+'_e.nc'
4256            write_elevation_nc(elevation_file,
4257                                lon,
4258                                lat,
4259                                depth)
4260            hashed_elevation = myhash(lonlatdep)
4261        else:
4262            msg = "The elevation information in the mux files is inconsistent"
4263            assert hashed_elevation == myhash(lonlatdep), msg
4264    files_out.append(elevation_file)
4265    return files_out
4266   
4267def _binary_c2nc(file_in, file_out, quantity):
4268    """
4269    Reads in a quantity urs file and writes a quantity nc file.
4270    additionally, returns the depth and lat, long info,
4271    so it can be written to a file.
4272    """
4273    columns = 3 # long, lat , depth
4274    mux_file = open(file_in, 'rb')
4275   
4276    # Number of points/stations
4277    (points_num,)= unpack('i',mux_file.read(4))
4278
4279    # nt, int - Number of time steps
4280    (time_step_count,)= unpack('i',mux_file.read(4))
4281
4282    #dt, float - time step, seconds
4283    (time_step,) = unpack('f', mux_file.read(4))
4284   
4285    msg = "Bad data in the mux file."
4286    if points_num < 0:
4287        mux_file.close()
4288        raise ANUGAError, msg
4289    if time_step_count < 0:
4290        mux_file.close()
4291        raise ANUGAError, msg
4292    if time_step < 0:
4293        mux_file.close()
4294        raise ANUGAError, msg
4295   
4296    lonlatdep = p_array.array('f')
4297    lonlatdep.read(mux_file, columns * points_num)
4298    lonlatdep = array(lonlatdep, typecode=Float)   
4299    lonlatdep = reshape(lonlatdep, (points_num, columns))
4300   
4301    lon, lat, depth = lon_lat2grid(lonlatdep)
4302    lon_sorted = list(lon)
4303    lon_sorted.sort()
4304
4305    if not lon == lon_sorted:
4306        msg = "Longitudes in mux file are not in ascending order"
4307        raise IOError, msg
4308    lat_sorted = list(lat)
4309    lat_sorted.sort()
4310
4311    if not lat == lat_sorted:
4312        msg = "Latitudes in mux file are not in ascending order"
4313   
4314    nc_file = Write_nc(quantity,
4315                       file_out,
4316                       time_step_count,
4317                       time_step,
4318                       lon,
4319                       lat)
4320
4321    for i in range(time_step_count):
4322        #Read in a time slice  from mux file 
4323        hz_p_array = p_array.array('f')
4324        hz_p_array.read(mux_file, points_num)
4325        hz_p = array(hz_p_array, typecode=Float)
4326        hz_p = reshape(hz_p, (len(lon), len(lat)))
4327        hz_p = transpose(hz_p) #mux has lat varying fastest, nc has long v.f.
4328
4329        #write time slice to nc file
4330        nc_file.store_timestep(hz_p)
4331    mux_file.close()
4332    nc_file.close()
4333
4334    return lonlatdep, lon, lat, depth
4335   
4336
4337def write_elevation_nc(file_out, lon, lat, depth_vector):
4338    """
4339    Write an nc elevation file.
4340    """
4341   
4342    # NetCDF file definition
4343    outfile = NetCDFFile(file_out, 'w')
4344
4345    #Create new file
4346    nc_lon_lat_header(outfile, lon, lat)
4347   
4348    # ELEVATION
4349    zname = 'ELEVATION'
4350    outfile.createVariable(zname, precision, (lat_name, lon_name))
4351    outfile.variables[zname].units='CENTIMETERS'
4352    outfile.variables[zname].missing_value=-1.e+034
4353
4354    outfile.variables[lon_name][:]= ensure_numeric(lon)
4355    outfile.variables[lat_name][:]= ensure_numeric(lat)
4356
4357    depth = reshape(depth_vector, ( len(lat), len(lon)))
4358    outfile.variables[zname][:]= depth
4359   
4360    outfile.close()
4361   
4362def nc_lon_lat_header(outfile, lon, lat):
4363    """
4364    outfile is the netcdf file handle.
4365    lon - a list/array of the longitudes
4366    lat - a list/array of the latitudes
4367    """
4368   
4369    outfile.institution = 'Geoscience Australia'
4370    outfile.description = 'Converted from URS binary C'
4371   
4372    # Longitude
4373    outfile.createDimension(lon_name, len(lon))
4374    outfile.createVariable(lon_name, precision, (lon_name,))
4375    outfile.variables[lon_name].point_spacing='uneven'
4376    outfile.variables[lon_name].units='degrees_east'
4377    outfile.variables[lon_name].assignValue(lon)
4378
4379
4380    # Latitude
4381    outfile.createDimension(lat_name, len(lat))
4382    outfile.createVariable(lat_name, precision, (lat_name,))
4383    outfile.variables[lat_name].point_spacing='uneven'
4384    outfile.variables[lat_name].units='degrees_north'
4385    outfile.variables[lat_name].assignValue(lat)
4386
4387
4388   
4389def lon_lat2grid(long_lat_dep):
4390    """
4391    given a list of points that are assumed to be an a grid,
4392    return the long's and lat's of the grid.
4393    long_lat_dep is an array where each row is a position.
4394    The first column is longitudes.
4395    The second column is latitudes.
4396
4397    The latitude is the fastest varying dimension - in mux files
4398    """
4399    LONG = 0
4400    LAT = 1
4401    QUANTITY = 2
4402
4403    long_lat_dep = ensure_numeric(long_lat_dep, Float)
4404   
4405    num_points = long_lat_dep.shape[0]
4406    this_rows_long = long_lat_dep[0,LONG]
4407
4408    # Count the length of unique latitudes
4409    i = 0
4410    while long_lat_dep[i,LONG] == this_rows_long and i < num_points:
4411        i += 1
4412    # determine the lats and longsfrom the grid
4413    lat = long_lat_dep[:i, LAT]       
4414    long = long_lat_dep[::i, LONG]
4415   
4416    lenlong = len(long)
4417    lenlat = len(lat)
4418    #print 'len lat', lat, len(lat)
4419    #print 'len long', long, len(long)
4420         
4421    msg = 'Input data is not gridded'     
4422    assert num_points % lenlat == 0, msg
4423    assert num_points % lenlong == 0, msg
4424         
4425    # Test that data is gridded       
4426    for i in range(lenlong):
4427        msg = 'Data is not gridded.  It must be for this operation'
4428        first = i*lenlat
4429        last = first + lenlat
4430               
4431        assert allclose(long_lat_dep[first:last,LAT], lat), msg
4432        assert allclose(long_lat_dep[first:last,LONG], long[i]), msg
4433   
4434   
4435#    print 'range long', min(long), max(long)
4436#    print 'range lat', min(lat), max(lat)
4437#    print 'ref long', min(long_lat_dep[:,0]), max(long_lat_dep[:,0])
4438#    print 'ref lat', min(long_lat_dep[:,1]), max(long_lat_dep[:,1])
4439   
4440   
4441   
4442    msg = 'Out of range latitudes/longitudes'
4443    for l in lat:assert -90 < l < 90 , msg
4444    for l in long:assert -180 < l < 180 , msg
4445
4446    # Changing quantity from lat being the fastest varying dimension to
4447    # long being the fastest varying dimension
4448    # FIXME - make this faster/do this a better way
4449    # use numeric transpose, after reshaping the quantity vector
4450#    quantity = zeros(len(long_lat_dep), Float)
4451    quantity = zeros(num_points, Float)
4452   
4453#    print 'num',num_points
4454    for lat_i, _ in enumerate(lat):
4455        for long_i, _ in enumerate(long):
4456            q_index = lat_i*lenlong+long_i
4457            lld_index = long_i*lenlat+lat_i
4458#            print 'lat_i', lat_i, 'long_i',long_i, 'q_index', q_index, 'lld_index', lld_index
4459            temp = long_lat_dep[lld_index, QUANTITY]
4460            quantity[q_index] = temp
4461           
4462    return long, lat, quantity
4463
4464####  END URS 2 SWW  ###
4465
4466#### URS UNGRIDDED 2 SWW ###
4467
4468### PRODUCING THE POINTS NEEDED FILE ###
4469
4470# Ones used for FESA 2007 results
4471#LL_LAT = -50.0
4472#LL_LONG = 80.0
4473#GRID_SPACING = 1.0/60.0
4474#LAT_AMOUNT = 4800
4475#LONG_AMOUNT = 3600
4476
4477def URS_points_needed_to_file(file_name, boundary_polygon, zone,
4478                              ll_lat, ll_long,
4479                              grid_spacing, 
4480                              lat_amount, long_amount,
4481                              isSouthernHemisphere=True,
4482                              export_csv=False, use_cache=False,
4483                              verbose=False):
4484    """
4485    Given the info to replicate the URS grid and a polygon output
4486    a file that specifies the cloud of boundary points for URS.
4487
4488    This creates a .urs file.  This is in the format used by URS;
4489    1st line is the number of points,
4490    each line after represents a point,in lats and longs.
4491   
4492    Note: The polygon cannot cross zones or hemispheres.
4493
4494    A work-a-round for different zones or hemispheres is to run this twice,
4495    once for each zone, and then combine the output.
4496   
4497    file_name - name of the urs file produced for David.
4498    boundary_polygon - a list of points that describes a polygon.
4499                      The last point is assumed ot join the first point.
4500                      This is in UTM (lat long would be better though)
4501
4502     This is info about the URS model that needs to be inputted.
4503     
4504    ll_lat - lower left latitude, in decimal degrees
4505    ll-long - lower left longitude, in decimal degrees
4506    grid_spacing - in deciamal degrees
4507    lat_amount - number of latitudes
4508    long_amount- number of longs
4509
4510
4511    Don't add the file extension.  It will be added.
4512    """
4513    geo = URS_points_needed(boundary_polygon, zone, ll_lat, ll_long,
4514                            grid_spacing, 
4515                            lat_amount, long_amount, isSouthernHemisphere,
4516                            use_cache, verbose)
4517    if not file_name[-4:] == ".urs":
4518        file_name += ".urs"
4519    geo.export_points_file(file_name, isSouthHemisphere=isSouthernHemisphere)
4520    if export_csv:
4521        if file_name[-4:] == ".urs":
4522            file_name = file_name[:-4] + ".csv"
4523        geo.export_points_file(file_name)
4524    return geo
4525
4526def URS_points_needed(boundary_polygon, zone, ll_lat,
4527                      ll_long, grid_spacing, 
4528                      lat_amount, long_amount, isSouthHemisphere=True,
4529                      use_cache=False, verbose=False):
4530    args = (boundary_polygon,
4531            zone, ll_lat,
4532            ll_long, grid_spacing, 
4533            lat_amount, long_amount, isSouthHemisphere)
4534    kwargs = {} 
4535    if use_cache is True:
4536        try:
4537            from anuga.caching import cache
4538        except:
4539            msg = 'Caching was requested, but caching module'+\
4540                  'could not be imported'
4541            raise msg
4542
4543
4544        geo = cache(_URS_points_needed,
4545                  args, kwargs,
4546                  verbose=verbose,
4547                  compression=False)
4548    else:
4549        geo = apply(_URS_points_needed, args, kwargs)
4550
4551    return geo
4552
4553def _URS_points_needed(boundary_polygon,
4554                      zone, ll_lat,
4555                      ll_long, grid_spacing, 
4556                      lat_amount, long_amount,
4557                       isSouthHemisphere):
4558    """
4559    boundary_polygon - a list of points that describes a polygon.
4560                      The last point is assumed ot join the first point.
4561                      This is in UTM (lat long would b better though)
4562
4563    ll_lat - lower left latitude, in decimal degrees
4564    ll-long - lower left longitude, in decimal degrees
4565    grid_spacing - in deciamal degrees
4566
4567    """
4568   
4569    from sets import ImmutableSet
4570   
4571    msg = "grid_spacing can not be zero"
4572    assert not grid_spacing == 0, msg
4573    a = boundary_polygon
4574    # List of segments.  Each segment is two points.
4575    segs = [i and [a[i-1], a[i]] or [a[len(a)-1], a[0]] for i in range(len(a))]
4576    # convert the segs to Lat's and longs.
4577   
4578    # Don't assume the zone of the segments is the same as the lower left
4579    # corner of the lat long data!!  They can easily be in different zones
4580   
4581    lat_long_set = ImmutableSet()
4582    for seg in segs:
4583        points_lat_long = points_needed(seg, ll_lat, ll_long, grid_spacing, 
4584                      lat_amount, long_amount, zone, isSouthHemisphere)
4585        lat_long_set |= ImmutableSet(points_lat_long)
4586    if lat_long_set == ImmutableSet([]):
4587        msg = """URS region specified and polygon does not overlap."""
4588        raise ValueError, msg
4589
4590    # Warning there is no info in geospatial saying the hemisphere of
4591    # these points.  There should be.
4592    geo = Geospatial_data(data_points=list(lat_long_set),
4593                              points_are_lats_longs=True)
4594    return geo
4595   
4596def points_needed(seg, ll_lat, ll_long, grid_spacing, 
4597                  lat_amount, long_amount, zone,
4598                  isSouthHemisphere):
4599    """
4600    seg is two points, in UTM
4601    return a list of the points, in lats and longs that are needed to
4602    interpolate any point on the segment.
4603    """
4604    from math import sqrt
4605    #print "zone",zone
4606    geo_reference = Geo_reference(zone=zone)
4607    #print "seg", seg
4608    geo = Geospatial_data(seg,geo_reference=geo_reference)
4609    seg_lat_long = geo.get_data_points(as_lat_long=True,
4610                                       isSouthHemisphere=isSouthHemisphere)
4611    #print "seg_lat_long", seg_lat_long
4612    # 1.415 = 2^0.5, rounded up....
4613    sqrt_2_rounded_up = 1.415
4614    buffer = sqrt_2_rounded_up * grid_spacing
4615   
4616    max_lat = max(seg_lat_long[0][0], seg_lat_long[1][0]) + buffer
4617    max_long = max(seg_lat_long[0][1], seg_lat_long[1][1]) + buffer
4618    min_lat = min(seg_lat_long[0][0], seg_lat_long[1][0]) - buffer
4619    min_long = min(seg_lat_long[0][1], seg_lat_long[1][1]) - buffer
4620
4621    #print "min_long", min_long
4622    #print "ll_long", ll_long
4623    #print "grid_spacing", grid_spacing
4624    first_row = (min_long - ll_long)/grid_spacing
4625    # To round up
4626    first_row_long = int(round(first_row + 0.5))
4627    #print "first_row", first_row_long
4628
4629    last_row = (max_long - ll_long)/grid_spacing # round down
4630    last_row_long = int(round(last_row))
4631    #print "last_row",last_row _long
4632   
4633    first_row = (min_lat - ll_lat)/grid_spacing
4634    # To round up
4635    first_row_lat = int(round(first_row + 0.5))
4636    #print "first_row", first_row_lat
4637
4638    last_row = (max_lat - ll_lat)/grid_spacing # round down
4639    last_row_lat = int(round(last_row))
4640    #print "last_row",last_row_lat
4641
4642    # to work out the max distance -
4643    # 111120 - horizontal distance between 1 deg latitude.
4644    #max_distance = sqrt_2_rounded_up * 111120 * grid_spacing
4645    max_distance = 157147.4112 * grid_spacing
4646    #print "max_distance", max_distance #2619.12 m for 1 minute
4647    points_lat_long = []
4648    # Create a list of the lat long points to include.
4649    for index_lat in range(first_row_lat, last_row_lat + 1):
4650        for index_long in range(first_row_long, last_row_long + 1):
4651            #print "index_lat", index_lat
4652            #print "index_long", index_long
4653            lat = ll_lat + index_lat*grid_spacing
4654            long = ll_long + index_long*grid_spacing
4655
4656            #filter here to keep good points
4657            if keep_point(lat, long, seg, max_distance):
4658                points_lat_long.append((lat, long)) #must be hashable
4659    #print "points_lat_long", points_lat_long
4660
4661    # Now that we have these points, lets throw ones out that are too far away
4662    return points_lat_long
4663
4664def keep_point(lat, long, seg, max_distance):
4665    """
4666    seg is two points, UTM
4667    """
4668    from math import sqrt
4669    _ , x0, y0 = redfearn(lat, long)
4670    x1 = seg[0][0]
4671    y1 = seg[0][1]
4672    x2 = seg[1][0]
4673    y2 = seg[1][1]
4674    x2_1 = x2-x1
4675    y2_1 = y2-y1
4676    try:
4677        d = abs((x2_1)*(y1-y0)-(x1-x0)*(y2_1))/sqrt( \
4678            (x2_1)*(x2_1)+(y2_1)*(y2_1))
4679    except ZeroDivisionError:
4680        #print "seg", seg
4681        #print "x0", x0
4682        #print "y0", y0
4683        if sqrt((x2_1)*(x2_1)+(y2_1)*(y2_1)) == 0 and \
4684           abs((x2_1)*(y1-y0)-(x1-x0)*(y2_1)) == 0:
4685            return True
4686        else:
4687            return False
4688   
4689    if d <= max_distance:
4690        return True
4691    else:
4692        return False
4693   
4694    #### CONVERTING UNGRIDDED URS DATA TO AN SWW FILE ####
4695   
4696WAVEHEIGHT_MUX_LABEL = '-z-mux'
4697EAST_VELOCITY_LABEL =  '-e-mux'
4698NORTH_VELOCITY_LABEL =  '-n-mux' 
4699def urs_ungridded2sww(basename_in='o', basename_out=None, verbose=False,
4700                      mint=None, maxt=None,
4701                      mean_stage=0,
4702                      origin=None,
4703                      hole_points_UTM=None,
4704                      zscale=1):
4705    """   
4706    Convert URS C binary format for wave propagation to
4707    sww format native to abstract_2d_finite_volumes.
4708
4709
4710    Specify only basename_in and read files of the form
4711    basefilename-z-mux, basefilename-e-mux and
4712    basefilename-n-mux containing relative height,
4713    x-velocity and y-velocity, respectively.
4714
4715    Also convert latitude and longitude to UTM. All coordinates are
4716    assumed to be given in the GDA94 datum. The latitude and longitude
4717    information is assumed ungridded grid.
4718
4719    min's and max's: If omitted - full extend is used.
4720    To include a value min ans max may equal it.
4721    Lat and lon are assumed to be in decimal degrees.
4722   
4723    origin is a 3-tuple with geo referenced
4724    UTM coordinates (zone, easting, northing)
4725    It will be the origin of the sww file. This shouldn't be used,
4726    since all of anuga should be able to handle an arbitary origin.
4727    The mux point info is NOT relative to this origin.
4728
4729
4730    URS C binary format has data orgainised as TIME, LONGITUDE, LATITUDE
4731    which means that latitude is the fastest
4732    varying dimension (row major order, so to speak)
4733
4734    In URS C binary the latitudes and longitudes are in assending order.
4735
4736    Note, interpolations of the resulting sww file will be different
4737    from results of urs2sww.  This is due to the interpolation
4738    function used, and the different grid structure between urs2sww
4739    and this function.
4740   
4741    Interpolating data that has an underlying gridded source can
4742    easily end up with different values, depending on the underlying
4743    mesh.
4744
4745    consider these 4 points
4746    50  -50
4747
4748    0     0
4749
4750    The grid can be
4751     -
4752    |\|    A
4753     -
4754     or;
4755      -
4756     |/|   B
4757      -
4758      If a point is just below the center of the midpoint, it will have a
4759      +ve value in grid A and a -ve value in grid B.
4760    """ 
4761    from anuga.mesh_engine.mesh_engine import NoTrianglesError
4762    from anuga.pmesh.mesh import Mesh
4763
4764    files_in = [basename_in + WAVEHEIGHT_MUX_LABEL,
4765                basename_in + EAST_VELOCITY_LABEL,
4766                basename_in + NORTH_VELOCITY_LABEL]
4767    quantities = ['HA','UA','VA']
4768
4769    # instanciate urs_points of the three mux files.
4770    mux = {}
4771    for quantity, file in map(None, quantities, files_in):
4772        mux[quantity] = Urs_points(file)
4773       
4774    # Could check that the depth is the same. (hashing)
4775
4776    # handle to a mux file to do depth stuff
4777    a_mux = mux[quantities[0]]
4778   
4779    # Convert to utm
4780    lat = a_mux.lonlatdep[:,1]
4781    long = a_mux.lonlatdep[:,0]
4782    points_utm, zone = convert_from_latlon_to_utm( \
4783        latitudes=lat, longitudes=long)
4784    #print "points_utm", points_utm
4785    #print "zone", zone
4786
4787    elevation = a_mux.lonlatdep[:,2] * -1 #
4788   
4789    # grid ( create a mesh from the selected points)
4790    # This mesh has a problem.  Triangles are streched over ungridded areas.
4791    #  If these areas could be described as holes in pmesh, that would be great
4792
4793    # I can't just get the user to selection a point in the middle.
4794    # A boundary is needed around these points.
4795    # But if the zone of points is obvious enough auto-segment should do
4796    # a good boundary.
4797    mesh = Mesh()
4798    mesh.add_vertices(points_utm)
4799    mesh.auto_segment(smooth_indents=True, expand_pinch=True)
4800    # To try and avoid alpha shape 'hugging' too much
4801    mesh.auto_segment( mesh.shape.get_alpha()*1.1 )
4802    if hole_points_UTM is not None:
4803        point = ensure_absolute(hole_points_UTM)
4804        mesh.add_hole(point[0], point[1])
4805    try:
4806        mesh.generate_mesh(minimum_triangle_angle=0.0, verbose=False)
4807    except NoTrianglesError:
4808        # This is a bit of a hack, going in and changing the
4809        # data structure.
4810        mesh.holes = []
4811        mesh.generate_mesh(minimum_triangle_angle=0.0, verbose=False)
4812    mesh_dic = mesh.Mesh2MeshList()
4813
4814    #mesh.export_mesh_file(basename_in + '_168.tsh')
4815    #import sys; sys.exit()
4816    # These are the times of the mux file
4817    mux_times = []
4818    for i in range(a_mux.time_step_count):
4819        mux_times.append(a_mux.time_step * i) 
4820    mux_times_start_i, mux_times_fin_i = mux2sww_time(mux_times, mint, maxt)
4821    times = mux_times[mux_times_start_i:mux_times_fin_i]
4822   
4823    if mux_times_start_i == mux_times_fin_i:
4824        # Close the mux files
4825        for quantity, file in map(None, quantities, files_in):
4826            mux[quantity].close()
4827        msg="Due to mint and maxt there's no time info in the boundary SWW."
4828        raise Exception, msg
4829       
4830    # If this raise is removed there is currently no downstream errors
4831           
4832    points_utm=ensure_numeric(points_utm)
4833    assert ensure_numeric(mesh_dic['generatedpointlist']) == \
4834           ensure_numeric(points_utm)
4835   
4836    volumes = mesh_dic['generatedtrianglelist']
4837   
4838    # write sww intro and grid stuff.   
4839    if basename_out is None:
4840        swwname = basename_in + '.sww'
4841    else:
4842        swwname = basename_out + '.sww'
4843
4844    if verbose: print 'Output to ', swwname
4845    outfile = NetCDFFile(swwname, 'w')
4846    # For a different way of doing this, check out tsh2sww
4847    # work out sww_times and the index range this covers
4848    sww = Write_sww()
4849    sww.store_header(outfile, times, len(volumes), len(points_utm),
4850                     verbose=verbose,sww_precision=Float)
4851    outfile.mean_stage = mean_stage
4852    outfile.zscale = zscale
4853
4854    sww.store_triangulation(outfile, points_utm, volumes,
4855                            elevation, zone,  new_origin=origin,
4856                            verbose=verbose)
4857   
4858    if verbose: print 'Converting quantities'
4859    j = 0
4860    # Read in a time slice from each mux file and write it to the sww file
4861    for ha, ua, va in map(None, mux['HA'], mux['UA'], mux['VA']):
4862        if j >= mux_times_start_i and j < mux_times_fin_i:
4863            stage = zscale*ha + mean_stage
4864            h = stage - elevation
4865            xmomentum = ua*h
4866            ymomentum = -1*va*h # -1 since in mux files south is positive.
4867            sww.store_quantities(outfile, 
4868                                 slice_index=j - mux_times_start_i,
4869                                 verbose=verbose,
4870                                 stage=stage,
4871                                 xmomentum=xmomentum,
4872                                 ymomentum=ymomentum,
4873                                 sww_precision=Float)
4874        j += 1
4875    if verbose: sww.verbose_quantities(outfile)
4876    outfile.close()
4877    #
4878    # Do some conversions while writing the sww file
4879
4880    ##################################
4881    # READ MUX2 FILES line of points #
4882    ##################################
4883
4884WAVEHEIGHT_MUX2_LABEL = '-z-mux2'
4885EAST_VELOCITY_MUX2_LABEL =  '-e-mux2'
4886NORTH_VELOCITY_MUX2_LABEL =  '-n-mux2'   
4887
4888def read_mux2_py(filenames,
4889                 weights,
4890                 permutation=None,
4891                 verbose=False):
4892    """Access the mux files specified in the filenames list. Combine the
4893       data found therin as a weighted linear sum as specifed by the weights.
4894       If permutation is None or empty extract timeseries data for all gauges within the
4895       files.
4896         
4897       Input:
4898           filenames:   List of filenames specifiying the file containing the
4899                        timeseries data (mux2 format) for each source
4900           weights:     Weights associated with each source
4901           permutation: Specifies the gauge numbers that for which data is to be
4902                        extracted
4903    """ 
4904
4905    from Numeric import ones,Float,compress,zeros,arange
4906    from urs_ext import read_mux2
4907
4908    numSrc=len(filenames)
4909   
4910    file_params=-1*ones(3,Float) #[nsta,dt,nt]
4911   
4912    # Convert verbose to int C flag
4913    if verbose:
4914        verbose=1
4915    else:
4916        verbose=0
4917       
4918    if permutation is None:
4919        permutation = ensure_numeric([], Float)   
4920
4921    #print 'filenames', filenames
4922    #print 'weights', weights
4923               
4924    # Call underlying C implementation urs2sts_ext.c   
4925    data = read_mux2(numSrc, filenames, weights, file_params, permutation, verbose)
4926
4927    msg='File parameter values were not read in correctly from c file'
4928    assert len(compress(file_params>0,file_params))!=0,msg
4929   
4930    msg='The number of stations specifed in the c array and in the file are inconsistent'
4931    assert file_params[0] >= len(permutation)
4932   
4933    msg='The number of stations returned is inconsistent with the requested number'   
4934    assert len(permutation) == 0 or len(permutation) == data.shape[0], msg
4935   
4936    nsta=int(file_params[0])
4937    msg='Must have at least one station'
4938    assert nsta>0,msg
4939   
4940    dt=file_params[1]
4941    msg='Must have a postive timestep'
4942    assert dt>0,msg
4943   
4944    nt=int(file_params[2])
4945    msg='Must have at least one gauge value'
4946    assert nt>0,msg
4947   
4948    OFFSET=5 # Number of site parameters p passed back with data
4949             # p=[geolat,geolon,depth,start_tstep,finish_tstep]
4950     
4951    # FIXME (Ole): What is the relationship with params and data.shape ?
4952    # It looks as if the following asserts should pass but they don't always
4953    #
4954             
4955    #print
4956    #print nsta, nt, data.shape
4957       
4958    #msg = 'nt = %d, data.shape[1] == %d' %(nt, data.shape[1])
4959    #assert nt == data.shape[1] - OFFSET, msg
4960   
4961    #msg = 'nsta = %d, data.shape[0] == %d' %(nsta, data.shape[0])   
4962    #assert nsta == data.shape[0], msg
4963
4964   
4965    # Number of stations in ordering file
4966    number_of_selected_stations = data.shape[0]
4967
4968    # Index where data ends and parameters begin
4969    parameters_index = data.shape[1]-OFFSET         
4970             
4971    times=dt*arange(parameters_index)   
4972    latitudes=zeros(number_of_selected_stations, Float)
4973    longitudes=zeros(number_of_selected_stations, Float)
4974    elevation=zeros(number_of_selected_stations, Float)
4975    quantity=zeros((number_of_selected_stations, parameters_index), Float)
4976   
4977
4978    #print 'number_of_selected_stations', number_of_selected_stations
4979    starttime=1e16
4980    for i in range(number_of_selected_stations):
4981        quantity[i][:]=data[i][:parameters_index]
4982       
4983        #print i, parameters_index
4984        #print quantity[i][:]
4985   
4986        latitudes[i]=data[i][parameters_index]
4987        longitudes[i]=data[i][parameters_index+1]
4988        elevation[i]=-data[i][parameters_index+2]
4989        first_time_step = data[i][parameters_index+3]
4990       
4991        starttime=min(dt*first_time_step, starttime)
4992       
4993    return times, latitudes, longitudes, elevation, quantity, starttime
4994   
4995
4996def mux2sww_time(mux_times, mint, maxt):
4997    """
4998    """
4999
5000    if mint == None:
5001        mux_times_start_i = 0
5002    else:
5003        mux_times_start_i = searchsorted(mux_times, mint)
5004       
5005    if maxt == None:
5006        mux_times_fin_i = len(mux_times)
5007    else:
5008        maxt += 0.5 # so if you specify a time where there is
5009                    # data that time will be included
5010        mux_times_fin_i = searchsorted(mux_times, maxt)
5011
5012    return mux_times_start_i, mux_times_fin_i
5013
5014
5015def urs2sts(basename_in, basename_out=None, 
5016            weights=None,
5017            verbose=False, 
5018            origin=None,
5019            zone=None,
5020            mean_stage=0.0, 
5021            zscale=1.0,
5022            ordering_filename=None):
5023    """Convert URS mux2 format for wave propagation to sts format
5024
5025    Also convert latitude and longitude to UTM. All coordinates are
5026    assumed to be given in the GDA94 datum
5027
5028    origin is a 3-tuple with geo referenced
5029    UTM coordinates (zone, easting, northing)
5030   
5031    inputs:
5032       
5033    basename_in: list of source file prefixes
5034   
5035        These are combined with the extensions:
5036        WAVEHEIGHT_MUX2_LABEL = '-z-mux2' for stage
5037        EAST_VELOCITY_MUX2_LABEL =  '-e-mux2' xmomentum
5038        NORTH_VELOCITY_MUX2_LABEL =  '-n-mux2' and ymomentum
5039   
5040        to create a 2D list of mux2 file. The rows are associated with each
5041        quantity and must have the above extensions
5042        the columns are the list of file prefixes.
5043   
5044    ordering: a .txt file name specifying which mux2 gauge points are
5045              to be stored. This is indicated by the index of the gauge
5046              in the ordering file.
5047   
5048              ordering file format:
5049              1st line:    'index,longitude,latitude\n'
5050              other lines: index,longitude,latitude
5051             
5052              If ordering is None or ordering file is empty then
5053               all points are taken in the order they
5054              appear in the mux2 file.
5055   
5056   
5057    output:
5058      basename_out: name of sts file in which mux2 data is stored.
5059   
5060   
5061    """
5062    import os
5063    from Scientific.IO.NetCDF import NetCDFFile
5064    from Numeric import Float, Int, Int32, searchsorted, zeros, array
5065    from Numeric import allclose, around,ones,Float
5066    from types import ListType,StringType
5067    from operator import __and__
5068   
5069    if not isinstance(basename_in, ListType):
5070        if verbose: print 'Reading single source'
5071        basename_in=[basename_in]
5072
5073    # This is the value used in the mux file format to indicate NAN data
5074    # FIXME (Ole): This should be changed everywhere to IEEE NAN when we upgrade to Numpy
5075    NODATA = 99   
5076
5077    # Check that basename is a list of strings
5078    if not reduce(__and__, map(lambda z:isinstance(z,StringType), basename_in)):
5079        msg= 'basename_in must be a string or list of strings'
5080        raise Exception, msg
5081
5082    # Find the number of sources to be used
5083    numSrc=len(basename_in)
5084
5085    # A weight must be specified for each source
5086    if weights is None:
5087        # Default is equal weighting
5088        weights=ones(numSrc,Float)/numSrc
5089    else:
5090        weights = ensure_numeric(weights)
5091        msg = 'When combining multiple sources must specify a weight for '+\
5092              'mux2 source file'
5093        assert len(weights) == numSrc, msg
5094       
5095    if verbose:
5096        print 'Weights used in urs2sts:', weights
5097
5098    # Check output filename   
5099    if basename_out is None:
5100        msg = 'STS filename must be specified as basename_out'
5101        msg += ' in function urs2sts'
5102        raise Exception, msg
5103   
5104    if basename_out.endswith('.sts'):
5105        stsname = basename_out
5106    else:   
5107        stsname = basename_out + '.sts'       
5108
5109    # Create input filenames from basenames and check their existence
5110    files_in=[[],[],[]]
5111    for files in basename_in:
5112        files_in[0].append(files + WAVEHEIGHT_MUX2_LABEL),
5113        files_in[1].append(files + EAST_VELOCITY_MUX2_LABEL)
5114        files_in[2].append(files + NORTH_VELOCITY_MUX2_LABEL)
5115   
5116    quantities = ['HA','UA','VA'] # Quantity names used in the MUX2 format
5117    for i in range(len(quantities)): 
5118        for file_in in files_in[i]:
5119            if (os.access(file_in, os.F_OK) == 0):
5120                msg = 'File %s does not exist or is not accessible' %file_in
5121                raise IOError, msg
5122
5123    # Establish permutation array
5124    if ordering_filename is not None:
5125
5126        if verbose is True:
5127            print 'Reading ordering file', ordering_filename
5128        # Read ordering file
5129        try:
5130            fid=open(ordering_filename, 'r')
5131            file_header=fid.readline().split(',')
5132            ordering_lines=fid.readlines()
5133            fid.close()
5134        except:
5135            msg = 'Cannot open %s'%ordering_filename
5136            raise Exception, msg
5137
5138        reference_header = 'index, longitude, latitude\n'
5139        reference_header_split = reference_header.split(',')
5140        for i in range(3):
5141            if not file_header[i].strip() == reference_header_split[i].strip():
5142                msg = 'File must contain header: '+reference_header
5143                raise Exception, msg
5144
5145        if len(ordering_lines)<2:
5146            msg = 'File must contain at least two points'
5147            raise Exception, msg
5148
5149        permutation = ensure_numeric([int(line.split(',')[0]) for line in ordering_lines])
5150    else:
5151        permutation = None
5152
5153
5154    #print 'permutation', permutation
5155    # Read MUX2 files
5156    if (verbose): print 'reading mux2 file'
5157    mux={}
5158    for i, quantity in enumerate(quantities):
5159   
5160        #print
5161        #print quantity
5162   
5163        # For each quantity read the associated list of source mux2 file with
5164        # extention associated with that quantity
5165        times,\
5166        latitudes,\
5167        longitudes,\
5168        elevation,\
5169        mux[quantity],\
5170        starttime = read_mux2_py(files_in[i], weights, permutation, verbose=verbose)
5171   
5172        # Check that all quantities have consistent time and space information     
5173        if quantity!=quantities[0]:
5174            msg='%s, %s and %s have inconsistent gauge data'\
5175                %(files_in[0], files_in[1], files_in[2])
5176            assert allclose(times, times_old), msg
5177            assert allclose(latitudes, latitudes_old), msg
5178            assert allclose(longitudes, longitudes_old), msg
5179            assert allclose(elevation, elevation_old), msg
5180            assert allclose(starttime, starttime_old), msg
5181        times_old=times
5182        latitudes_old=latitudes
5183        longitudes_old=longitudes
5184        elevation_old=elevation
5185        starttime_old=starttime
5186
5187
5188       
5189       
5190       
5191        # Self check - can be removed to improve speed
5192
5193        #ref_longitudes = [float(line.split(',')[1]) for line in ordering_lines]               
5194        #ref_latitudes = [float(line.split(',')[2]) for line in ordering_lines]       
5195        #
5196        #msg = 'Longitudes specified in ordering file do not match those found in mux files'
5197        #msg += 'I got %s instead of %s (only beginning shown)'\
5198        #    %(str(longitudes[:10]) + '...',
5199        #      str(ref_longitudes[:10]) + '...')       
5200        #assert allclose(longitudes, ref_longitudes), msg       
5201        #
5202        #msg = 'Latitudes specified in ordering file do not match those found in mux files. '
5203        #msg += 'I got %s instead of %s (only beginning shown)'\
5204        #    %(str(latitudes[:10]) + '...',
5205        #      str(ref_latitudes[:10]) + '...')               
5206        #assert allclose(latitudes, ref_latitudes), msg
5207       
5208               
5209       
5210    # Store timeseries in NetCDF STS file   
5211    msg='File is empty and or clipped region not in file region'
5212    assert len(latitudes>0),msg
5213
5214    number_of_points = latitudes.shape[0]      # Number of stations retrieved
5215    number_of_times = times.shape[0]           # Number of timesteps
5216    number_of_latitudes = latitudes.shape[0]   # Number latitudes
5217    number_of_longitudes = longitudes.shape[0] # Number longitudes
5218
5219   
5220    # The permutation vector of contains original indices
5221    # as given in ordering file or None in which case points
5222    # are assigned the trivial indices enumerating them from
5223    # 0 to number_of_points-1
5224    if permutation is None:
5225        permutation = arange(number_of_points, typecode=Int)
5226   
5227   
5228    # NetCDF file definition
5229    outfile = NetCDFFile(stsname, 'w')
5230
5231    description = 'Converted from URS mux2 files: %s'\
5232                  %(basename_in)
5233   
5234    # Create new file
5235    sts = Write_sts()
5236
5237    sts.store_header(outfile, 
5238                     times+starttime,
5239                     number_of_points, 
5240                     description=description,
5241                     verbose=verbose,
5242                     sts_precision=Float)
5243   
5244    # Store
5245    from anuga.coordinate_transforms.redfearn import redfearn
5246    x = zeros(number_of_points, Float)  # Easting
5247    y = zeros(number_of_points, Float)  # Northing
5248
5249    # Check zone boundaries
5250    if zone is None:
5251        refzone, _, _ = redfearn(latitudes[0],longitudes[0])
5252    else:
5253        refzone = zone
5254
5255    old_zone = refzone
5256   
5257    for i in range(number_of_points):
5258        zone, easting, northing = redfearn(latitudes[i],longitudes[i], zone=zone)
5259        x[i] = easting
5260        y[i] = northing
5261        if zone != refzone:
5262            msg='All sts gauges need to be in the same zone. \n'
5263            msg+='offending gauge:Zone %d,%.4f, %4f\n' %(zone, easting, northing)
5264            msg+='previous gauge:Zone %d,%.4f, %4f' %(old_zone, old_easting, old_northing) 
5265            raise Exception, msg
5266        old_zone = zone
5267        old_easting = easting
5268        old_northing = northing
5269
5270    if origin is None:
5271        origin = Geo_reference(refzone,min(x),min(y))
5272    geo_ref = write_NetCDF_georeference(origin, outfile)
5273
5274
5275   
5276    #print geo_ref.get_xllcorner()
5277    #print geo_ref.get_yllcorner()
5278
5279    elevation = resize(elevation,outfile.variables['elevation'][:].shape)
5280    outfile.variables['permutation'][:] = permutation.astype(Int32) # On Opteron 64
5281    outfile.variables['x'][:] = x - geo_ref.get_xllcorner()
5282    outfile.variables['y'][:] = y - geo_ref.get_yllcorner()
5283    outfile.variables['elevation'][:] = elevation
5284
5285    stage = outfile.variables['stage']
5286    xmomentum = outfile.variables['xmomentum']
5287    ymomentum = outfile.variables['ymomentum']
5288
5289    if verbose: print 'Converting quantities'
5290    for j in range(len(times)):
5291        for i in range(number_of_points):
5292            ha = mux['HA'][i,j]
5293            ua = mux['UA'][i,j]
5294            va = mux['VA'][i,j]
5295            if ha == NODATA:
5296                if verbose:
5297                    msg = 'Setting nodata value %d to 0 at time = %f, point = %d'\
5298                          %(ha, times[j], i)
5299                    print msg
5300                ha = 0.0
5301                ua = 0.0
5302                va = 0.0
5303               
5304            w = zscale*ha + mean_stage
5305            h=w-elevation[i]
5306            stage[j,i] = w
5307
5308            xmomentum[j,i] = ua*h
5309            ymomentum[j,i] = va*h
5310
5311    outfile.close()
5312
5313def create_sts_boundary(stsname):
5314    """
5315        Create boundary segments from .sts file. Points can be stored in
5316    arbitrary order within the .sts file. The order in which the .sts points
5317    make up the boundary are given in order.txt file
5318   
5319    FIXME:
5320    Point coordinates are stored in relative eastings and northings.
5321    But boundary is produced in absolute coordinates
5322   
5323    """
5324    try:
5325        fid = NetCDFFile(stsname+'.sts', 'r')
5326    except:
5327        msg = 'Cannot open %s'%stsname+'.sts'
5328
5329
5330    xllcorner = fid.xllcorner[0]
5331    yllcorner = fid.yllcorner[0]
5332    #Points stored in sts file are normalised to [xllcorner,yllcorner] but
5333    #we cannot assume that boundary polygon will be. At least the
5334    #additional points specified by the user after this function is called
5335    x = fid.variables['x'][:]+xllcorner
5336    y = fid.variables['y'][:]+yllcorner
5337
5338    x = reshape(x, (len(x),1))
5339    y = reshape(y, (len(y),1))
5340    sts_points = concatenate((x,y), axis=1)
5341
5342    return sts_points.tolist()
5343
5344class Write_sww:
5345    from anuga.shallow_water.shallow_water_domain import Domain
5346
5347    # FIXME (Ole): Hardwiring the conserved quantities like
5348    # this could be a problem. I would prefer taking them from
5349    # the instantiation of Domain.
5350    #
5351    # (DSG) There is not always a Domain instance when Write_sww is used.
5352    # Check to see if this is the same level of hardwiring as is in
5353    # shallow water doamain.
5354   
5355    sww_quantities = Domain.conserved_quantities
5356
5357
5358    RANGE = '_range'
5359    EXTREMA = ':extrema'
5360
5361    def __init__(self):
5362        pass
5363   
5364    def store_header(self,
5365                     outfile,
5366                     times,
5367                     number_of_volumes,
5368                     number_of_points,
5369                     description='Converted from XXX',
5370                     smoothing=True,
5371                     order=1,
5372                     sww_precision=Float32,
5373                     verbose=False):
5374        """
5375        outfile - the name of the file that will be written
5376        times - A list of the time slice times OR a start time
5377        Note, if a list is given the info will be made relative.
5378        number_of_volumes - the number of triangles
5379        """
5380   
5381        outfile.institution = 'Geoscience Australia'
5382        outfile.description = description
5383
5384        # For sww compatibility
5385        if smoothing is True:
5386            # Smoothing to be depreciated
5387            outfile.smoothing = 'Yes'
5388            outfile.vertices_are_stored_uniquely = 'False'
5389        else:
5390            # Smoothing to be depreciated
5391            outfile.smoothing = 'No'
5392            outfile.vertices_are_stored_uniquely = 'True'
5393        outfile.order = order
5394
5395        try:
5396            revision_number = get_revision_number()
5397        except:
5398            revision_number = None
5399        # Allow None to be stored as a string               
5400        outfile.revision_number = str(revision_number) 
5401
5402
5403       
5404        # times - A list or array of the time slice times OR a start time
5405        # times = ensure_numeric(times)
5406        # Start time in seconds since the epoch (midnight 1/1/1970)
5407
5408        # This is being used to seperate one number from a list.
5409        # what it is actually doing is sorting lists from numeric arrays.
5410        if type(times) is list or type(times) is ArrayType: 
5411            number_of_times = len(times)
5412            times = ensure_numeric(times) 
5413            if number_of_times == 0:
5414                starttime = 0
5415            else:
5416                starttime = times[0]
5417                times = times - starttime  #Store relative times
5418        else:
5419            number_of_times = 0
5420            starttime = times
5421            #times = ensure_numeric([])
5422        outfile.starttime = starttime
5423        # dimension definitions
5424        outfile.createDimension('number_of_volumes', number_of_volumes)
5425        outfile.createDimension('number_of_vertices', 3)
5426        outfile.createDimension('numbers_in_range', 2)
5427   
5428        if smoothing is True:
5429            outfile.createDimension('number_of_points', number_of_points)
5430       
5431            # FIXME(Ole): This will cause sww files for paralle domains to
5432            # have ghost nodes stored (but not used by triangles).
5433            # To clean this up, we have to change get_vertex_values and
5434            # friends in quantity.py (but I can't be bothered right now)
5435        else:
5436            outfile.createDimension('number_of_points', 3*number_of_volumes)
5437        outfile.createDimension('number_of_timesteps', number_of_times)
5438
5439        # variable definitions
5440        outfile.createVariable('x', sww_precision, ('number_of_points',))
5441        outfile.createVariable('y', sww_precision, ('number_of_points',))
5442        outfile.createVariable('elevation', sww_precision, ('number_of_points',))
5443        q = 'elevation'
5444        outfile.createVariable(q+Write_sww.RANGE, sww_precision,
5445                               ('numbers_in_range',))
5446
5447
5448        # Initialise ranges with small and large sentinels.
5449        # If this was in pure Python we could have used None sensibly
5450        outfile.variables[q+Write_sww.RANGE][0] = max_float  # Min
5451        outfile.variables[q+Write_sww.RANGE][1] = -max_float # Max
5452
5453        # FIXME: Backwards compatibility
5454        outfile.createVariable('z', sww_precision, ('number_of_points',))
5455        #################################
5456
5457        outfile.createVariable('volumes', Int, ('number_of_volumes',
5458                                                'number_of_vertices'))
5459        # Doing sww_precision instead of Float gives cast errors.
5460        outfile.createVariable('time', Float,
5461                               ('number_of_timesteps',))
5462       
5463        for q in Write_sww.sww_quantities:
5464            outfile.createVariable(q, sww_precision,
5465                                   ('number_of_timesteps',
5466                                    'number_of_points')) 
5467            outfile.createVariable(q+Write_sww.RANGE, sww_precision,
5468                                   ('numbers_in_range',))
5469
5470            # Initialise ranges with small and large sentinels.
5471            # If this was in pure Python we could have used None sensibly
5472            outfile.variables[q+Write_sww.RANGE][0] = max_float  # Min
5473            outfile.variables[q+Write_sww.RANGE][1] = -max_float # Max
5474           
5475        if type(times) is list or type(times) is ArrayType: 
5476            outfile.variables['time'][:] = times    #Store time relative
5477           
5478        if verbose:
5479            print '------------------------------------------------'
5480            print 'Statistics:'
5481            print '    t in [%f, %f], len(t) == %d'\
5482                  %(min(times.flat), max(times.flat), len(times.flat))
5483
5484       
5485    def store_triangulation(self,
5486                            outfile,
5487                            points_utm,
5488                            volumes,
5489                            elevation, zone=None, new_origin=None, 
5490                            points_georeference=None, verbose=False):
5491        """
5492       
5493        new_origin - qa georeference that the points can be set to. (Maybe
5494        do this before calling this function.)
5495       
5496        points_utm - currently a list or array of the points in UTM.
5497        points_georeference - the georeference of the points_utm
5498       
5499        How about passing new_origin and current_origin.
5500        If you get both, do a convertion from the old to the new.
5501       
5502        If you only get new_origin, the points are absolute,
5503        convert to relative
5504       
5505        if you only get the current_origin the points are relative, store
5506        as relative.
5507       
5508        if you get no georefs create a new georef based on the minimums of
5509        points_utm.  (Another option would be to default to absolute)
5510       
5511        Yes, and this is done in another part of the code.
5512        Probably geospatial.
5513       
5514        If you don't supply either geo_refs, then supply a zone. If not
5515        the default zone will be used.
5516       
5517       
5518        precon
5519       
5520        header has been called.
5521        """
5522       
5523        number_of_points = len(points_utm)   
5524        volumes = array(volumes) 
5525        points_utm = array(points_utm)
5526
5527        # given the two geo_refs and the points, do the stuff
5528        # described in the method header
5529        # if this is needed else where, pull out as a function
5530        points_georeference = ensure_geo_reference(points_georeference)
5531        new_origin = ensure_geo_reference(new_origin)
5532        if new_origin is None and points_georeference is not None:
5533            points = points_utm
5534            geo_ref = points_georeference
5535        else:
5536            if new_origin is None:
5537                new_origin = Geo_reference(zone,min(points_utm[:,0]),
5538                                           min(points_utm[:,1]))
5539            points = new_origin.change_points_geo_ref(points_utm,
5540                                                      points_georeference)
5541            geo_ref = new_origin
5542
5543        # At this stage I need a georef and points
5544        # the points are relative to the georef
5545        geo_ref.write_NetCDF(outfile)
5546   
5547        # This will put the geo ref in the middle
5548        #geo_ref=Geo_reference(refzone,(max(x)+min(x))/2.0,(max(x)+min(y))/2.)
5549       
5550        x =  points[:,0]
5551        y =  points[:,1]
5552        z = outfile.variables['z'][:]
5553   
5554        if verbose:
5555            print '------------------------------------------------'
5556            print 'More Statistics:'
5557            print '  Extent (/lon):'
5558            print '    x in [%f, %f], len(lat) == %d'\
5559                  %(min(x), max(x),
5560                    len(x))
5561            print '    y in [%f, %f], len(lon) == %d'\
5562                  %(min(y), max(y),
5563                    len(y))
5564            print '    z in [%f, %f], len(z) == %d'\
5565                  %(min(elevation), max(elevation),
5566                    len(elevation))
5567            print 'geo_ref: ',geo_ref
5568            print '------------------------------------------------'
5569           
5570        #z = resize(bath_grid,outfile.variables['z'][:].shape)
5571        #print "points[:,0]", points[:,0]
5572        outfile.variables['x'][:] = points[:,0] #- geo_ref.get_xllcorner()
5573        outfile.variables['y'][:] = points[:,1] #- geo_ref.get_yllcorner()
5574        outfile.variables['z'][:] = elevation
5575        outfile.variables['elevation'][:] = elevation  #FIXME HACK
5576        outfile.variables['volumes'][:] = volumes.astype(Int32) #On Opteron 64
5577
5578        q = 'elevation'
5579        # This updates the _range values
5580        outfile.variables[q+Write_sww.RANGE][0] = min(elevation)
5581        outfile.variables[q+Write_sww.RANGE][1] = max(elevation)
5582
5583
5584    def store_quantities(self, outfile, sww_precision=Float32,
5585                         slice_index=None, time=None,
5586                         verbose=False, **quant):
5587        """
5588        Write the quantity info.
5589
5590        **quant is extra keyword arguments passed in. These must be
5591          the sww quantities, currently; stage, xmomentum, ymomentum.
5592       
5593        if the time array is already been built, use the slice_index
5594        to specify the index.
5595       
5596        Otherwise, use time to increase the time dimension
5597
5598        Maybe make this general, but the viewer assumes these quantities,
5599        so maybe we don't want it general - unless the viewer is general
5600       
5601        precon
5602        triangulation and
5603        header have been called.
5604        """
5605
5606        if time is not None:
5607            file_time = outfile.variables['time']
5608            slice_index = len(file_time)
5609            file_time[slice_index] = time   
5610
5611        # Write the conserved quantities from Domain.
5612        # Typically stage,  xmomentum, ymomentum
5613        # other quantities will be ignored, silently.
5614        # Also write the ranges: stage_range,
5615        # xmomentum_range and ymomentum_range
5616        for q in Write_sww.sww_quantities:
5617            if not quant.has_key(q):
5618                msg = 'SWW file can not write quantity %s' %q
5619                raise NewQuantity, msg
5620            else:
5621                q_values = quant[q]
5622                outfile.variables[q][slice_index] = \
5623                                q_values.astype(sww_precision)
5624
5625                # This updates the _range values
5626                q_range = outfile.variables[q+Write_sww.RANGE][:]
5627                q_values_min = min(q_values)
5628                if q_values_min < q_range[0]:
5629                    outfile.variables[q+Write_sww.RANGE][0] = q_values_min
5630                q_values_max = max(q_values)
5631                if q_values_max > q_range[1]:
5632                    outfile.variables[q+Write_sww.RANGE][1] = q_values_max
5633
5634    def verbose_quantities(self, outfile):
5635        print '------------------------------------------------'
5636        print 'More Statistics:'
5637        for q in Write_sww.sww_quantities:
5638            print %s in [%f, %f]' %(q,
5639                                       outfile.variables[q+Write_sww.RANGE][0],
5640                                       outfile.variables[q+Write_sww.RANGE][1])
5641        print '------------------------------------------------'
5642
5643
5644       
5645def obsolete_write_sww_time_slices(outfile, has, uas, vas, elevation,
5646                         mean_stage=0, zscale=1,
5647                         verbose=False):   
5648    #Time stepping
5649    stage = outfile.variables['stage']
5650    xmomentum = outfile.variables['xmomentum']
5651    ymomentum = outfile.variables['ymomentum']
5652
5653    n = len(has)
5654    j=0
5655    for ha, ua, va in map(None, has, uas, vas):
5656        if verbose and j%((n+10)/10)==0: print '  Doing %d of %d' %(j, n)
5657        w = zscale*ha + mean_stage
5658        stage[j] = w
5659        h = w - elevation
5660        xmomentum[j] = ua*h
5661        ymomentum[j] = -1*va*#  -1 since in mux files south is positive.
5662        j += 1
5663   
5664def urs2txt(basename_in, location_index=None):
5665    """
5666    Not finished or tested
5667    """
5668   
5669    files_in = [basename_in + WAVEHEIGHT_MUX_LABEL,
5670                basename_in + EAST_VELOCITY_LABEL,
5671                basename_in + NORTH_VELOCITY_LABEL]
5672    quantities = ['HA','UA','VA']
5673
5674    d = ","
5675   
5676    # instanciate urs_points of the three mux files.
5677    mux = {}
5678    for quantity, file in map(None, quantities, files_in):
5679        mux[quantity] = Urs_points(file)
5680       
5681    # Could check that the depth is the same. (hashing)
5682
5683    # handle to a mux file to do depth stuff
5684    a_mux = mux[quantities[0]]
5685   
5686    # Convert to utm
5687    latitudes = a_mux.lonlatdep[:,1]
5688    longitudes = a_mux.lonlatdep[:,0]
5689    points_utm, zone = convert_from_latlon_to_utm( \
5690        latitudes=latitudes, longitudes=longitudes)
5691    #print "points_utm", points_utm
5692    #print "zone", zone
5693    depths = a_mux.lonlatdep[:,2]  #
5694   
5695    fid = open(basename_in + '.txt', 'w')
5696
5697    fid.write("zone: " + str(zone) + "\n")
5698
5699    if location_index is not None:
5700        #Title
5701        li = location_index
5702        fid.write('location_index'+d+'lat'+d+ 'long' +d+ 'Easting' +d+ \
5703                  'Northing' + "\n")
5704        fid.write(str(li) +d+ str(latitudes[li])+d+ \
5705              str(longitudes[li]) +d+ str(points_utm[li][0]) +d+ \
5706              str(points_utm[li][01]) + "\n")
5707
5708    # the non-time dependent stuff
5709    #Title
5710    fid.write('location_index'+d+'lat'+d+ 'long' +d+ 'Easting' +d+ \
5711              'Northing' +d+ 'depth m' + "\n")
5712    i = 0
5713    for depth, point_utm, lat, long in map(None, depths,
5714                                               points_utm, latitudes,
5715                                               longitudes):
5716       
5717        fid.write(str(i) +d+ str(lat)+d+ str(long) +d+ str(point_utm[0]) +d+ \
5718                  str(point_utm[01]) +d+ str(depth) + "\n")
5719        i +=1
5720    #Time dependent
5721    if location_index is not None:
5722        time_step = a_mux.time_step
5723        i = 0
5724        #Title
5725        fid.write('time' +d+ 'HA depth m'+d+ \
5726                 'UA momentum East x m/sec' +d+ 'VA momentum North y m/sec' \
5727                      + "\n")
5728        for HA, UA, VA in map(None, mux['HA'], mux['UA'], mux['VA']):
5729            fid.write(str(i*time_step) +d+ str(HA[location_index])+d+ \
5730                      str(UA[location_index]) +d+ str(VA[location_index]) \
5731                      + "\n")
5732           
5733            i +=1
5734
5735class Write_sts:
5736
5737    sts_quantities = ['stage','xmomentum','ymomentum']
5738
5739
5740    RANGE = '_range'
5741    EXTREMA = ':extrema'
5742   
5743    def __init__(self):
5744        pass
5745
5746    def store_header(self,
5747                     outfile,
5748                     times,
5749                     number_of_points,
5750                     description='Converted from URS mux2 format',
5751                     sts_precision=Float32,
5752                     verbose=False):
5753        """
5754        outfile - the name of the file that will be written
5755        times - A list of the time slice times OR a start time
5756        Note, if a list is given the info will be made relative.
5757        number_of_points - the number of urs gauges sites
5758        """
5759
5760        outfile.institution = 'Geoscience Australia'
5761        outfile.description = description
5762
5763        try:
5764            revision_number = get_revision_number()
5765        except:
5766            revision_number = None
5767        # Allow None to be stored as a string               
5768        outfile.revision_number = str(revision_number) 
5769       
5770        # times - A list or array of the time slice times OR a start time
5771        # Start time in seconds since the epoch (midnight 1/1/1970)
5772
5773        # This is being used to seperate one number from a list.
5774        # what it is actually doing is sorting lists from numeric arrays.
5775        if type(times) is list or type(times) is ArrayType: 
5776            number_of_times = len(times)
5777            times = ensure_numeric(times) 
5778            if number_of_times == 0:
5779                starttime = 0
5780            else:
5781                starttime = times[0]
5782                times = times - starttime  #Store relative times
5783        else:
5784            number_of_times = 0
5785            starttime = times
5786
5787        outfile.starttime = starttime
5788
5789        # Dimension definitions
5790        outfile.createDimension('number_of_points', number_of_points)
5791        outfile.createDimension('number_of_timesteps', number_of_times)
5792        outfile.createDimension('numbers_in_range', 2)
5793
5794        # Variable definitions
5795        outfile.createVariable('permutation', Int, ('number_of_points',)) 
5796        outfile.createVariable('x', sts_precision, ('number_of_points',))
5797        outfile.createVariable('y', sts_precision, ('number_of_points',))
5798        outfile.createVariable('elevation', sts_precision,('number_of_points',))
5799
5800        q = 'elevation'
5801        outfile.createVariable(q+Write_sts.RANGE, sts_precision,
5802                               ('numbers_in_range',))
5803
5804        # Initialise ranges with small and large sentinels.
5805        # If this was in pure Python we could have used None sensibly
5806        outfile.variables[q+Write_sts.RANGE][0] = max_float  # Min
5807        outfile.variables[q+Write_sts.RANGE][1] = -max_float # Max
5808
5809        # Doing sts_precision instead of Float gives cast errors.
5810        outfile.createVariable('time', Float, ('number_of_timesteps',))
5811
5812        for q in Write_sts.sts_quantities:
5813            outfile.createVariable(q, sts_precision,
5814                                   ('number_of_timesteps',
5815                                    'number_of_points'))
5816            outfile.createVariable(q+Write_sts.RANGE, sts_precision,
5817                                   ('numbers_in_range',))
5818            # Initialise ranges with small and large sentinels.
5819            # If this was in pure Python we could have used None sensibly
5820            outfile.variables[q+Write_sts.RANGE][0] = max_float  # Min
5821            outfile.variables[q+Write_sts.RANGE][1] = -max_float # Max
5822
5823        if type(times) is list or type(times) is ArrayType: 
5824            outfile.variables['time'][:] = times    #Store time relative
5825
5826        if verbose:
5827            print '------------------------------------------------'
5828            print 'Statistics:'
5829            print '    t in [%f, %f], len(t) == %d'\
5830                  %(min(times.flat), max(times.flat), len(times.flat))
5831
5832    def store_points(self,
5833                     outfile,
5834                     points_utm,
5835                     elevation, zone=None, new_origin=None, 
5836                     points_georeference=None, verbose=False):
5837
5838        """
5839        points_utm - currently a list or array of the points in UTM.
5840        points_georeference - the georeference of the points_utm
5841
5842        How about passing new_origin and current_origin.
5843        If you get both, do a convertion from the old to the new.
5844       
5845        If you only get new_origin, the points are absolute,
5846        convert to relative
5847       
5848        if you only get the current_origin the points are relative, store
5849        as relative.
5850       
5851        if you get no georefs create a new georef based on the minimums of
5852        points_utm.  (Another option would be to default to absolute)
5853       
5854        Yes, and this is done in another part of the code.
5855        Probably geospatial.
5856       
5857        If you don't supply either geo_refs, then supply a zone. If not
5858        the default zone will be used.
5859
5860        precondition:
5861             header has been called.
5862        """
5863
5864        number_of_points = len(points_utm)
5865        points_utm = array(points_utm)
5866
5867        # given the two geo_refs and the points, do the stuff
5868        # described in the method header
5869        points_georeference = ensure_geo_reference(points_georeference)
5870        new_origin = ensure_geo_reference(new_origin)
5871       
5872        if new_origin is None and points_georeference is not None:
5873            points = points_utm
5874            geo_ref = points_georeference
5875        else:
5876            if new_origin is None:
5877                new_origin = Geo_reference(zone,min(points_utm[:,0]),
5878                                           min(points_utm[:,1]))
5879            points = new_origin.change_points_geo_ref(points_utm,
5880                                                      points_georeference)
5881            geo_ref = new_origin
5882
5883        # At this stage I need a georef and points
5884        # the points are relative to the georef
5885        geo_ref.write_NetCDF(outfile)
5886
5887        x =  points[:,0]
5888        y =  points[:,1]
5889        z = outfile.variables['z'][:]
5890   
5891        if verbose:
5892            print '------------------------------------------------'
5893            print 'More Statistics:'
5894            print '  Extent (/lon):'
5895            print '    x in [%f, %f], len(lat) == %d'\
5896                  %(min(x), max(x),
5897                    len(x))
5898            print '    y in [%f, %f], len(lon) == %d'\
5899                  %(min(y), max(y),
5900                    len(y))
5901            print '    z in [%f, %f], len(z) == %d'\
5902                  %(min(elevation), max(elevation),
5903                    len(elevation))
5904            print 'geo_ref: ',geo_ref
5905            print '------------------------------------------------'
5906           
5907        #z = resize(bath_grid,outfile.variables['z'][:].shape)
5908        #print "points[:,0]", points[:,0]
5909        outfile.variables['x'][:] = points[:,0] #- geo_ref.get_xllcorner()
5910        outfile.variables['y'][:] = points[:,1] #- geo_ref.get_yllcorner()
5911        outfile.variables['z'][:] = elevation
5912        outfile.variables['elevation'][:] = elevation  #FIXME HACK4
5913
5914        q = 'elevation'
5915        # This updates the _range values
5916        outfile.variables[q+Write_sts.RANGE][0] = min(elevation)
5917        outfile.variables[q+Write_sts.RANGE][1] = max(elevation)
5918
5919    def store_quantities(self, outfile, sts_precision=Float32,
5920                         slice_index=None, time=None,
5921                         verbose=False, **quant):
5922       
5923        """
5924        Write the quantity info.
5925
5926        **quant is extra keyword arguments passed in. These must be
5927          the sts quantities, currently; stage.
5928       
5929        if the time array is already been built, use the slice_index
5930        to specify the index.
5931       
5932        Otherwise, use time to increase the time dimension
5933
5934        Maybe make this general, but the viewer assumes these quantities,
5935        so maybe we don't want it general - unless the viewer is general
5936       
5937        precondition:
5938            triangulation and
5939            header have been called.
5940        """
5941        if time is not None:
5942            file_time = outfile.variables['time']
5943            slice_index = len(file_time)
5944            file_time[slice_index] = time   
5945
5946        # Write the conserved quantities from Domain.
5947        # Typically stage,  xmomentum, ymomentum
5948        # other quantities will be ignored, silently.
5949        # Also write the ranges: stage_range
5950        for q in Write_sts.sts_quantities:
5951            if not quant.has_key(q):
5952                msg = 'STS file can not write quantity %s' %q
5953                raise NewQuantity, msg
5954            else:
5955                q_values = quant[q]
5956                outfile.variables[q][slice_index] = \
5957                                q_values.astype(sts_precision)
5958
5959                # This updates the _range values
5960                q_range = outfile.variables[q+Write_sts.RANGE][:]
5961                q_values_min = min(q_values)
5962                if q_values_min < q_range[0]:
5963                    outfile.variables[q+Write_sts.RANGE][0] = q_values_min
5964                q_values_max = max(q_values)
5965                if q_values_max > q_range[1]:
5966                    outfile.variables[q+Write_sts.RANGE][1] = q_values_max
5967
5968
5969   
5970class Urs_points:
5971    """
5972    Read the info in URS mux files.
5973
5974    for the quantities heres a correlation between the file names and
5975    what they mean;
5976    z-mux is height above sea level, m
5977    e-mux is velocity is Eastern direction, m/s
5978    n-mux is velocity is Northern direction, m/s   
5979    """
5980    def __init__(self,urs_file):
5981        self.iterated = False
5982        columns = 3 # long, lat , depth
5983        mux_file = open(urs_file, 'rb')
5984       
5985        # Number of points/stations
5986        (self.points_num,)= unpack('i',mux_file.read(4))
5987       
5988        # nt, int - Number of time steps
5989        (self.time_step_count,)= unpack('i',mux_file.read(4))
5990        #print "self.time_step_count", self.time_step_count
5991        #dt, float - time step, seconds
5992        (self.time_step,) = unpack('f', mux_file.read(4))
5993        #print "self.time_step", self.time_step
5994        msg = "Bad data in the urs file."
5995        if self.points_num < 0:
5996            mux_file.close()
5997            raise ANUGAError, msg
5998        if self.time_step_count < 0:
5999            mux_file.close()
6000            raise ANUGAError, msg
6001        if self.time_step < 0:
6002            mux_file.close()
6003            raise ANUGAError, msg
6004
6005        # the depth is in meters, and it is the distance from the ocean
6006        # to the sea bottom.
6007        lonlatdep = p_array.array('f')
6008        lonlatdep.read(mux_file, columns * self.points_num)
6009        lonlatdep = array(lonlatdep, typecode=Float)   
6010        lonlatdep = reshape(lonlatdep, (self.points_num, columns))
6011        #print 'lonlatdep',lonlatdep
6012        self.lonlatdep = lonlatdep
6013       
6014        self.mux_file = mux_file
6015        # check this array
6016
6017    def __iter__(self):
6018        """
6019        iterate over quantity data which is with respect to time.
6020
6021        Note: You can only interate once over an object
6022       
6023        returns quantity infomation for each time slice
6024        """
6025        msg =  "You can only interate once over a urs file."
6026        assert not self.iterated, msg
6027        self.iter_time_step = 0
6028        self.iterated = True
6029        return self
6030   
6031    def next(self):
6032        if self.time_step_count == self.iter_time_step:
6033            self.close()
6034            raise StopIteration
6035        #Read in a time slice  from mux file 
6036        hz_p_array = p_array.array('f')
6037        hz_p_array.read(self.mux_file, self.points_num)
6038        hz_p = array(hz_p_array, typecode=Float)
6039        self.iter_time_step += 1
6040       
6041        return hz_p
6042
6043    def close(self):
6044        self.mux_file.close()
6045       
6046    #### END URS UNGRIDDED 2 SWW ###
6047
6048       
6049def start_screen_catcher(dir_name=None, myid='', numprocs='', extra_info='',
6050                         verbose=True):
6051    """
6052    Used to store screen output and errors to file, if run on multiple
6053    processes eachprocessor will have its own output and error file.
6054   
6055    extra_info - is used as a string that can identify outputs with another
6056    string eg. '_other'
6057   
6058    FIXME: Would be good if you could suppress all the screen output and
6059    only save it to file... however it seems a bit tricky as this capture
6060    techique response to sys.stdout and by this time it is already printed out.
6061    """
6062   
6063    import sys
6064#    dir_name = dir_name
6065    if dir_name == None:
6066        dir_name=getcwd()
6067       
6068    if access(dir_name,W_OK) == 0:
6069        if verbose: print 'Making directory %s' %dir_name
6070      #  if verbose: print "myid", myid
6071        mkdir (dir_name,0777)
6072
6073    if myid <>'':
6074        myid = '_'+str(myid)
6075    if numprocs <>'':
6076        numprocs = '_'+str(numprocs)
6077    if extra_info <>'':
6078        extra_info = '_'+str(extra_info)
6079#    print 'hello1'
6080    screen_output_name = join(dir_name, "screen_output%s%s%s.txt" %(myid,
6081                                                                numprocs,
6082                                                                extra_info))
6083    screen_error_name = join(dir_name,  "screen_error%s%s%s.txt" %(myid,
6084                                                              numprocs,
6085                                                              extra_info))
6086
6087    if verbose: print 'Starting ScreenCatcher, all output will be stored in %s' \
6088                                     %(screen_output_name)
6089    #used to catch screen output to file
6090    sys.stdout = Screen_Catcher(screen_output_name)
6091    sys.stderr = Screen_Catcher(screen_error_name)
6092
6093class Screen_Catcher:
6094    """this simply catches the screen output and stores it to file defined by
6095    start_screen_catcher (above)
6096    """
6097   
6098    def __init__(self, filename):
6099        self.filename = filename
6100#        print 'init'
6101        if exists(self.filename)is True:
6102            print'Old existing file "%s" has been deleted' %(self.filename)
6103            remove(self.filename)
6104
6105    def write(self, stuff):
6106        fid = open(self.filename, 'a')
6107        fid.write(stuff)
6108        fid.close()
6109       
6110# FIXME (DSG): Add unit test, make general, not just 2 files,
6111# but any number of files.
6112def copy_code_files(dir_name, filename1, filename2=None):
6113    """Copies "filename1" and "filename2" to "dir_name". Very useful for
6114    information management
6115    filename1 and filename2 are both absolute pathnames   
6116    """
6117
6118    if access(dir_name,F_OK) == 0:
6119        print 'Make directory %s' %dir_name
6120        mkdir (dir_name,0777)
6121    shutil.copy(filename1, dir_name + sep + basename(filename1))
6122    if filename2!=None:
6123        shutil.copy(filename2, dir_name + sep + basename(filename2))
6124        print 'Files %s and %s copied' %(filename1, filename2)
6125    else:
6126        print 'File %s copied' %(filename1)
6127
6128def get_data_from_file(filename,separator_value = ','):
6129    """
6130    Read in data information from file and
6131   
6132    Returns:
6133        header_fields, a string? of the first line separated
6134        by the 'separator_value'
6135       
6136        data, a array (N data columns X M lines) in the file
6137        excluding the header
6138       
6139    NOTE: wont deal with columns with different lenghts and there must be
6140    no blank lines at the end.
6141    """
6142   
6143    fid = open(filename)
6144    lines = fid.readlines()
6145   
6146    fid.close()
6147   
6148    header_line = lines[0]
6149    header_fields = header_line.split(separator_value)
6150
6151    #array to store data, number in there is to allow float...
6152    #i'm sure there is a better way!
6153    data=array([],typecode=Float)
6154    data=resize(data,((len(lines)-1),len(header_fields)))
6155#    print 'number of fields',range(len(header_fields))
6156#    print 'number of lines',len(lines), shape(data)
6157#    print'data',data[1,1],header_line
6158
6159    array_number = 0
6160    line_number = 1
6161    while line_number < (len(lines)):
6162        for i in range(len(header_fields)): 
6163            #this get line below the header, explaining the +1
6164            #and also the line_number can be used as the array index
6165            fields = lines[line_number].split(separator_value)
6166            #assign to array
6167            data[array_number,i] = float(fields[i])
6168           
6169        line_number = line_number +1
6170        array_number = array_number +1
6171       
6172    return header_fields, data
6173
6174def store_parameters(verbose=False,**kwargs):
6175    """
6176    Store "kwargs" into a temp csv file, if "completed" is a kwargs csv file is
6177    kwargs[file_name] else it is kwargs[output_dir] + details_temp.csv
6178   
6179    Must have a file_name keyword arg, this is what is writing to.
6180    might be a better way to do this using CSV module Writer and writeDict
6181   
6182    writes file to "output_dir" unless "completed" is in kwargs, then
6183    it writes to "file_name" kwargs
6184
6185    """
6186    import types
6187#    import os
6188   
6189    # Check that kwargs is a dictionary
6190    if type(kwargs) != types.DictType:
6191        raise TypeError
6192   
6193    #is completed is kwargs?
6194    try:
6195        kwargs['completed']
6196        completed=True
6197    except:
6198        completed=False
6199 
6200    #get file name and removes from dict and assert that a file_name exists
6201    if completed:
6202        try:
6203            file = str(kwargs['file_name'])
6204        except:
6205            raise 'kwargs must have file_name'
6206    else:
6207        #write temp file in output directory
6208        try:
6209            file = str(kwargs['output_dir'])+'detail_temp.csv'
6210        except:
6211            raise 'kwargs must have output_dir'
6212       
6213    #extracts the header info and the new line info
6214    line=''
6215    header=''
6216    count=0
6217    keys = kwargs.keys()
6218    keys.sort()
6219   
6220    #used the sorted keys to create the header and line data
6221    for k in keys:
6222#        print "%s = %s" %(k, kwargs[k])
6223        header = header+str(k)
6224        line = line+str(kwargs[k])
6225        count+=1
6226        if count <len(kwargs):
6227            header = header+','
6228            line = line+','
6229    header+='\n'
6230    line+='\n'
6231
6232    # checks the header info, if the same, then write, if not create a new file
6233    #try to open!
6234    try:
6235        fid = open(file,"r")
6236        file_header=fid.readline()
6237        fid.close()
6238        if verbose: print 'read file header %s' %file_header
6239       
6240    except:
6241        msg = 'try to create new file',file
6242        if verbose: print msg
6243        #tries to open file, maybe directory is bad
6244        try:
6245            fid = open(file,"w")
6246            fid.write(header)
6247            fid.close()
6248            file_header=header
6249        except:
6250            msg = 'cannot create new file',file
6251            raise msg
6252           
6253    #if header is same or this is a new file
6254    if file_header==str(header):
6255        fid=open(file,"a")
6256        #write new line
6257        fid.write(line)
6258        fid.close()
6259    else:
6260        #backup plan,
6261        # if header is different and has completed will append info to
6262        #end of details_temp.cvs file in output directory
6263        file = str(kwargs['output_dir'])+'detail_temp.csv'
6264        fid=open(file,"a")
6265        fid.write(header)
6266        fid.write(line)
6267        fid.close()
6268        if verbose: print 'file',file_header.strip('\n')
6269        if verbose: print 'head',header.strip('\n')
6270        if file_header.strip('\n')==str(header): print 'they equal'
6271        msg = 'WARNING: File header does not match input info, the input variables have changed, suggest to change file name'
6272        print msg
6273
6274
6275
6276# ----------------------------------------------
6277# Functions to obtain diagnostics from sww files
6278#-----------------------------------------------
6279
6280def get_mesh_and_quantities_from_file(filename,
6281                                      quantities=None,
6282                                      verbose=False):
6283    """Get and rebuild mesh structure and associated quantities from sww file
6284   
6285    Input:
6286        filename - Name os sww file
6287        quantities - Names of quantities to load
6288
6289    Output:
6290        mesh - instance of class Interpolate
6291               (including mesh and interpolation functionality)
6292        quantities - arrays with quantity values at each mesh node
6293        time - vector of stored timesteps
6294       
6295       
6296    This function is used by e.g. get_interpolated_quantities_at_polyline_midpoints
6297    """
6298 
6299    # FIXME (Ole): Maybe refactor filefunction using this more fundamental code.
6300   
6301    import types
6302    from Scientific.IO.NetCDF import NetCDFFile
6303    from shallow_water import Domain
6304    from Numeric import asarray, transpose, resize
6305    from anuga.abstract_2d_finite_volumes.neighbour_mesh import Mesh
6306
6307    if verbose: print 'Reading from ', filename
6308    fid = NetCDFFile(filename, 'r')    # Open existing file for read
6309    time = fid.variables['time'][:]    # Time vector
6310    time += fid.starttime[0]
6311   
6312    # Get the variables as Numeric arrays
6313    x = fid.variables['x'][:]                   # x-coordinates of nodes
6314    y = fid.variables['y'][:]                   # y-coordinates of nodes
6315    elevation = fid.variables['elevation'][:]   # Elevation
6316    stage = fid.variables['stage'][:]           # Water level
6317    xmomentum = fid.variables['xmomentum'][:]   # Momentum in the x-direction
6318    ymomentum = fid.variables['ymomentum'][:]   # Momentum in the y-direction
6319
6320
6321    # Mesh (nodes (Mx2), triangles (Nx3))
6322    nodes = concatenate( (x[:, NewAxis], y[:, NewAxis]), axis=1 )
6323    triangles = fid.variables['volumes'][:]
6324   
6325    # Get geo_reference
6326    try:
6327        geo_reference = Geo_reference(NetCDFObject=fid)
6328    except: #AttributeError, e:
6329        # Sww files don't have to have a geo_ref
6330        geo_reference = None
6331
6332    if verbose: print '    building mesh from sww file %s' %filename
6333    boundary = None
6334
6335    #FIXME (Peter Row): Should this be in mesh?
6336    if fid.smoothing != 'Yes':
6337        nodes = nodes.tolist()
6338        triangles = triangles.tolist()
6339        nodes, triangles, boundary=weed(nodes, triangles, boundary)
6340
6341
6342    try:
6343        mesh = Mesh(nodes, triangles, boundary,
6344                    geo_reference=geo_reference)
6345    except AssertionError, e:
6346        fid.close()
6347        msg = 'Domain could not be created: %s. "' %e
6348        raise DataDomainError, msg
6349
6350
6351    quantities = {}
6352    quantities['elevation'] = elevation
6353    quantities['stage'] = stage   
6354    quantities['xmomentum'] = xmomentum
6355    quantities['ymomentum'] = ymomentum   
6356
6357    fid.close()
6358    return mesh, quantities, time
6359
6360
6361   
6362
6363def get_interpolated_quantities_at_polyline_midpoints(filename,
6364                                                      quantity_names=None,
6365                                                      polyline=None, 
6366                                                      verbose=False):
6367    """Get values for quantities interpolated to polyline midpoints from sww file
6368   
6369    Input:
6370        filename - Name of sww file
6371        quantity_names - Names of quantities to load
6372        polyline: Representation of desired cross section - it may contain
6373                  multiple sections allowing for complex shapes. Assume
6374                  absolute UTM coordinates.
6375                  Format [[x0, y0], [x1, y1], ...]             
6376
6377    Output:
6378        segments: list of instances of class Triangle_intersection
6379        interpolation_function: Instance of class Interpolation_function
6380         
6381         
6382      This function is used by get_flow_through_cross_section and
6383      get_energy_through_cross_section         
6384    """
6385   
6386    from anuga.fit_interpolate.interpolate import Interpolation_function
6387
6388    # Get mesh and quantities from sww file
6389    X = get_mesh_and_quantities_from_file(filename,
6390                                          quantities=quantity_names,
6391                                          verbose=verbose)
6392    mesh, quantities, time = X
6393
6394    # Find all intersections and associated triangles.
6395    segments = mesh.get_intersecting_segments(polyline, verbose=verbose)
6396   
6397    # Get midpoints
6398    interpolation_points = segment_midpoints(segments)       
6399
6400    # Interpolate
6401    if verbose:
6402        print 'Interpolating - ',       
6403        print 'total number of interpolation points = %d'\
6404              %len(interpolation_points)
6405   
6406    I = Interpolation_function(time,
6407                               quantities,
6408                               quantity_names=quantity_names,
6409                               vertex_coordinates=mesh.nodes,
6410                               triangles=mesh.triangles,
6411                               interpolation_points=interpolation_points,
6412                               verbose=verbose)
6413                               
6414    return segments, I
6415   
6416
6417def get_flow_through_cross_section(filename,
6418                                   polyline,
6419                                   verbose=False):
6420    """Obtain flow (m^3/s) perpendicular to specified cross section.
6421
6422    Inputs:
6423        filename: Name of sww file
6424        polyline: Representation of desired cross section - it may contain
6425                  multiple sections allowing for complex shapes. Assume
6426                  absolute UTM coordinates.
6427                  Format [[x0, y0], [x1, y1], ...]
6428
6429    Output:
6430        time: All stored times in sww file
6431        Q: Hydrograph of total flow across given segments for all stored times.
6432
6433    The normal flow is computed for each triangle intersected by the polyline
6434    and added up.  Multiple segments at different angles are specified the
6435    normal flows may partially cancel each other.
6436
6437    The typical usage of this function would be to get flow through a channel,
6438    and the polyline would then be a cross section perpendicular to the flow.
6439
6440    """
6441
6442    quantity_names =['elevation',
6443                     'stage',
6444                     'xmomentum',
6445                     'ymomentum']
6446
6447
6448    # Get values for quantities at each midpoint of poly line from sww file
6449    X = get_interpolated_quantities_at_polyline_midpoints(filename,
6450                                                          quantity_names=quantity_names,
6451                                                          polyline=polyline,
6452                                                          verbose=verbose)   
6453    segments, interpolation_function = X
6454
6455   
6456    # Get vectors for time and interpolation_points
6457    time = interpolation_function.time
6458    interpolation_points = interpolation_function.interpolation_points   
6459
6460    if verbose: print 'Computing hydrograph'
6461    # Compute hydrograph
6462    Q = []
6463    for t in time:
6464        total_flow=0
6465        for i in range(len(interpolation_points)):
6466            elevation, stage, uh, vh = interpolation_function(t, point_id=i)
6467            normal = segments[i].normal
6468
6469            # Inner product of momentum vector with segment normal [m^2/s]
6470            normal_momentum = uh*normal[0] + vh*normal[1] 
6471
6472            # Flow across this segment [m^3/s]
6473            segment_flow = normal_momentum*segments[i].length
6474
6475            # Accumulate
6476            total_flow += segment_flow
6477             
6478
6479        # Store flow at this timestep   
6480        Q.append(total_flow)
6481
6482
6483    return time, Q
6484   
6485
6486def get_energy_through_cross_section(filename,
6487                                     polyline,
6488                                     kind = 'total',
6489                                     verbose=False):
6490    """Obtain average energy head [m] across specified cross section.
6491
6492    Inputs:
6493        polyline: Representation of desired cross section - it may contain
6494                  multiple sections allowing for complex shapes. Assume
6495                  absolute UTM coordinates.
6496                  Format [[x0, y0], [x1, y1], ...]
6497        kind:     Select which energy to compute.
6498                  Options are 'specific' and 'total' (default)
6499
6500    Output:
6501        E: Average energy [m] across given segments for all stored times.
6502
6503    The average velocity is computed for each triangle intersected by
6504    the polyline and averaged weighted by segment lengths.
6505
6506    The typical usage of this function would be to get average energy of
6507    flow in a channel, and the polyline would then be a cross section
6508    perpendicular to the flow.
6509
6510    #FIXME (Ole) - need name for this energy reflecting that its dimension
6511    is [m].
6512    """
6513
6514    from anuga.config import g, epsilon, velocity_protection as h0       
6515           
6516    quantity_names =['elevation',
6517                     'stage',
6518                     'xmomentum',
6519                     'ymomentum']
6520
6521
6522    # Get values for quantities at each midpoint of poly line from sww file
6523    X = get_interpolated_quantities_at_polyline_midpoints(filename,
6524                                                          quantity_names=\
6525                                                          quantity_names,
6526                                                          polyline=polyline,
6527                                                          verbose=verbose)   
6528    segments, interpolation_function = X
6529
6530   
6531    # Get vectors for time and interpolation_points
6532    time = interpolation_function.time
6533    interpolation_points = interpolation_function.interpolation_points   
6534
6535    if verbose: print 'Computing %s energy' %kind
6536   
6537    # Compute total length of polyline for use with weighted averages
6538    total_line_length = 0.0
6539    for segment in segments:
6540        total_line_length += segment.length
6541       
6542    # Compute energy
6543    E = []
6544    for t in time:
6545        average_energy=0.0
6546        for i, p in enumerate(interpolation_points):
6547            elevation, stage, uh, vh = interpolation_function(t, point_id=i)
6548           
6549            # Depth
6550            h = depth = stage-elevation
6551           
6552            # Average velocity across this segment
6553            if h > epsilon:
6554                # Use protection against degenerate velocities
6555                u = uh/(h + h0/h)
6556                v = vh/(h + h0/h)
6557            else:
6558                u = v = 0.0
6559               
6560            speed_squared = u*u + v*v   
6561            kinetic_energy = 0.5*speed_squared/g
6562           
6563            if kind == 'specific':
6564                segment_energy = depth + kinetic_energy
6565            elif kind == 'total':
6566                segment_energy = stage + kinetic_energy               
6567            else:
6568                msg = 'Energy kind must be either "specific" or "total".'
6569                msg += ' I got %s' %kind
6570               
6571
6572            # Add to weighted average
6573            weigth = segments[i].length/total_line_length
6574            average_energy += segment_energy*weigth
6575             
6576
6577        # Store energy at this timestep   
6578        E.append(average_energy)
6579
6580
6581    return time, E
6582   
6583
6584
6585
6586
6587def get_maximum_inundation_elevation(filename,
6588                                     polygon=None,
6589                                     time_interval=None,
6590                                     verbose=False):
6591   
6592    """Return highest elevation where depth > 0
6593   
6594    Usage:
6595    max_runup = get_maximum_inundation_elevation(filename,
6596                                                 polygon=None,
6597                                                 time_interval=None,
6598                                                 verbose=False)
6599
6600    filename is a NetCDF sww file containing ANUGA model output.   
6601    Optional arguments polygon and time_interval restricts the maximum
6602    runup calculation
6603    to a points that lie within the specified polygon and time interval.
6604
6605    If no inundation is found within polygon and time_interval the return value
6606    is None signifying "No Runup" or "Everything is dry".
6607
6608    See general function get_maximum_inundation_data for details.
6609   
6610    """
6611   
6612    runup, _ = get_maximum_inundation_data(filename,
6613                                           polygon=polygon,
6614                                           time_interval=time_interval,
6615                                           verbose=verbose)
6616    return runup
6617
6618
6619
6620
6621def get_maximum_inundation_location(filename,
6622                                    polygon=None,
6623                                    time_interval=None,
6624                                    verbose=False):
6625    """Return location of highest elevation where h > 0
6626   
6627   
6628    Usage:
6629    max_runup_location = get_maximum_inundation_location(filename,
6630                                                         polygon=None,
6631                                                         time_interval=None,
6632                                                         verbose=False)
6633
6634    filename is a NetCDF sww file containing ANUGA model output.
6635    Optional arguments polygon and time_interval restricts the maximum
6636    runup calculation
6637    to a points that lie within the specified polygon and time interval.
6638
6639    If no inundation is found within polygon and time_interval the return value
6640    is None signifying "No Runup" or "Everything is dry".
6641
6642    See general function get_maximum_inundation_data for details.
6643    """
6644   
6645    _, max_loc = get_maximum_inundation_data(filename,
6646                                             polygon=polygon,
6647                                             time_interval=time_interval,
6648                                             verbose=verbose)
6649    return max_loc
6650   
6651
6652
6653def get_maximum_inundation_data(filename, polygon=None, time_interval=None,
6654                                use_centroid_values=False,
6655                                verbose=False):
6656    """Compute maximum run up height from sww file.
6657
6658
6659    Usage:
6660    runup, location = get_maximum_inundation_data(filename,
6661                                                  polygon=None,
6662                                                  time_interval=None,
6663                                                  verbose=False)
6664   
6665
6666    Algorithm is as in get_maximum_inundation_elevation from
6667    shallow_water_domain
6668    except that this function works with the sww file and computes the maximal
6669    runup height over multiple timesteps.
6670   
6671    Optional arguments polygon and time_interval restricts the
6672    maximum runup calculation
6673    to a points that lie within the specified polygon and time interval.
6674    Polygon is
6675    assumed to be in (absolute) UTM coordinates in the same zone as domain.
6676
6677    If no inundation is found within polygon and time_interval the return value
6678    is None signifying "No Runup" or "Everything is dry".
6679    """
6680
6681    # We are using nodal values here as that is what is stored in sww files.
6682
6683    # Water depth below which it is considered to be 0 in the model
6684    # FIXME (Ole): Allow this to be specified as a keyword argument as well
6685
6686    from anuga.utilities.polygon import inside_polygon   
6687    from anuga.config import minimum_allowed_height
6688    from Scientific.IO.NetCDF import NetCDFFile
6689
6690    dir, base = os.path.split(filename)
6691           
6692    iterate_over = get_all_swwfiles(dir,base)
6693   
6694    # Read sww file
6695    if verbose: 
6696        print 'Reading from %s' %filename
6697        # FIXME: Use general swwstats (when done)
6698   
6699    maximal_runup = None
6700    maximal_runup_location = None
6701   
6702    for file, swwfile in enumerate (iterate_over):
6703       
6704        # Read sww file
6705        filename = join(dir,swwfile+'.sww')
6706       
6707        if verbose: 
6708            print 'Reading from %s' %filename
6709            # FIXME: Use general swwstats (when done)
6710               
6711        fid = NetCDFFile(filename)
6712   
6713        # Get geo_reference
6714        # sww files don't have to have a geo_ref
6715        try:
6716            geo_reference = Geo_reference(NetCDFObject=fid)
6717        except AttributeError, e:
6718            geo_reference = Geo_reference() # Default georef object
6719           
6720        xllcorner = geo_reference.get_xllcorner()
6721        yllcorner = geo_reference.get_yllcorner()
6722        zone = geo_reference.get_zone()
6723       
6724        # Get extent
6725        volumes = fid.variables['volumes'][:]   
6726        x = fid.variables['x'][:] + xllcorner
6727        y = fid.variables['y'][:] + yllcorner
6728   
6729   
6730        # Get the relevant quantities (Convert from single precison)
6731        elevation = array(fid.variables['elevation'][:], Float) 
6732        stage = array(fid.variables['stage'][:], Float)
6733   
6734   
6735        # Here's where one could convert nodal information to centroid
6736        # information
6737        # but is probably something we need to write in C.
6738        # Here's a Python thought which is NOT finished!!!
6739        if use_centroid_values is True:
6740            x = get_centroid_values(x, volumes)
6741            y = get_centroid_values(y, volumes)   
6742            elevation = get_centroid_values(elevation, volumes)   
6743   
6744   
6745        # Spatial restriction
6746        if polygon is not None:
6747            msg = 'polygon must be a sequence of points.'
6748            assert len(polygon[0]) == 2, msg
6749            # FIXME (Ole): Make a generic polygon input check in polygon.py
6750            # and call it here
6751           
6752            points = concatenate((x[:,NewAxis], y[:,NewAxis]), axis=1)
6753   
6754            point_indices = inside_polygon(points, polygon)
6755   
6756            # Restrict quantities to polygon
6757            elevation = take(elevation, point_indices)
6758            stage = take(stage, point_indices, axis=1)
6759   
6760            # Get info for location of maximal runup
6761            points_in_polygon = take(points, point_indices)
6762            x = points_in_polygon[:,0]
6763            y = points_in_polygon[:,1]       
6764        else:
6765            # Take all points
6766            point_indices = arange(len(x))
6767           
6768   
6769        # Temporal restriction
6770        time = fid.variables['time'][:]
6771        all_timeindices = arange(len(time))       
6772        if time_interval is not None:
6773           
6774            msg = 'time_interval must be a sequence of length 2.'
6775            assert len(time_interval) == 2, msg
6776            msg = 'time_interval %s must not be decreasing.' %(time_interval)
6777            assert time_interval[1] >= time_interval[0], msg
6778           
6779            msg = 'Specified time interval [%.8f:%.8f]' %tuple(time_interval)
6780            msg += ' must does not match model time interval: [%.8f, %.8f]\n'\
6781                   %(time[0], time[-1])
6782            if time_interval[1] < time[0]: raise ValueError(msg)
6783            if time_interval[0] > time[-1]: raise ValueError(msg)
6784   
6785            # Take time indices corresponding to interval (& is bitwise AND)
6786            timesteps = compress((time_interval[0] <= time) & (time <= time_interval[1]),
6787                                 all_timeindices)
6788   
6789   
6790            msg = 'time_interval %s did not include any model timesteps.' %(time_interval)       
6791            assert not alltrue(timesteps == 0), msg
6792   
6793   
6794        else:
6795            # Take them all
6796            timesteps = all_timeindices
6797       
6798   
6799        fid.close()
6800   
6801        # Compute maximal runup for each timestep
6802        #maximal_runup = None
6803        #maximal_runup_location = None
6804        #maximal_runups = [None]
6805        #maximal_runup_locations = [None]
6806       
6807        for i in timesteps:
6808            if use_centroid_values is True:
6809                stage_i = get_centroid_values(stage[i,:], volumes)   
6810            else:
6811                stage_i = stage[i,:]
6812               
6813            depth = stage_i  - elevation
6814       
6815            # Get wet nodes i.e. nodes with depth>0 within given region and timesteps
6816            wet_nodes = compress(depth > minimum_allowed_height, arange(len(depth)))
6817   
6818            if alltrue(wet_nodes == 0):
6819                runup = None
6820            else:   
6821                # Find maximum elevation among wet nodes
6822                wet_elevation = take(elevation, wet_nodes)
6823   
6824                runup_index = argmax(wet_elevation)
6825                runup = max(wet_elevation)
6826                assert wet_elevation[runup_index] == runup # Must be True
6827            #print "runup", runup
6828            #print "maximal_runup", maximal_runup
6829           
6830            if runup > maximal_runup:
6831                maximal_runup = runup      # This works even if maximal_runups is None
6832                #print "NEW RUNUP",runup
6833   
6834                # Record location
6835                wet_x = take(x, wet_nodes)
6836                wet_y = take(y, wet_nodes)           
6837                maximal_runup_location = [wet_x[runup_index], wet_y[runup_index]]
6838   
6839    #print 'maximal_runup, maximal_runup_location',maximal_runup, maximal_runup_location
6840    return maximal_runup, maximal_runup_location
6841
6842def get_all_swwfiles(look_in_dir='',base_name='',verbose=False):
6843    '''
6844    Finds all the sww files in a "look_in_dir" which contains a "base_name".
6845    will accept base_name with or without the extension ".sww"
6846   
6847    Returns: a list of strings
6848       
6849    Usage:     iterate_over = get_all_swwfiles(dir, name)
6850    then
6851               for swwfile in iterate_over:
6852                   do stuff
6853                   
6854    Check "export_grids" and "get_maximum_inundation_data" for examples
6855    '''
6856   
6857    #plus tests the extension
6858    name, extension = os.path.splitext(base_name)
6859
6860    if extension <>'' and extension <> '.sww':
6861        msg = msg='file %s %s must be an NetCDF sww file!'%(base_name,extension)
6862        raise IOError, msg
6863
6864    if look_in_dir == "":
6865        look_in_dir = "." # Unix compatibility
6866   
6867    dir_ls = os.listdir(look_in_dir)
6868    #print 'dir_ls',dir_ls, base
6869    iterate_over = [x[:-4] for x in dir_ls if name in x and x[-4:] == '.sww']
6870    if len(iterate_over) == 0:
6871        msg = 'No files of the base name %s'\
6872              %(name)
6873        raise IOError, msg
6874    if verbose: print 'iterate over %s' %(iterate_over)
6875
6876    return iterate_over
6877
6878def get_all_files_with_extension(look_in_dir='',base_name='',extension='.sww',verbose=False):
6879    '''
6880    Finds all the sww files in a "look_in_dir" which contains a "base_name".
6881   
6882   
6883    Returns: a list of strings
6884       
6885    Usage:     iterate_over = get_all_swwfiles(dir, name)
6886    then
6887               for swwfile in iterate_over:
6888                   do stuff
6889                   
6890    Check "export_grids" and "get_maximum_inundation_data" for examples
6891    '''
6892   
6893    #plus tests the extension
6894    name, ext = os.path.splitext(base_name)
6895#    print 'look_in_dir',look_in_dir
6896
6897    if ext <>'' and ext <> extension:
6898        msg = msg='base_name %s must be an file with %s extension!'%(base_name,extension)
6899        raise IOError, msg
6900
6901    if look_in_dir == "":
6902        look_in_dir = "." # Unix compatibility
6903#    print 'look_in_dir',look_in_dir, getcwd()
6904    dir_ls = os.listdir(look_in_dir)
6905    #print 'dir_ls',dir_ls, base_name
6906    iterate_over = [x[:-4] for x in dir_ls if name in x and x[-4:] == extension]
6907    if len(iterate_over) == 0:
6908        msg = 'No files of the base name %s in %s'\
6909              %(name, look_in_dir)
6910        raise IOError, msg
6911    if verbose: print 'iterate over %s' %(iterate_over)
6912
6913    return iterate_over
6914
6915def get_all_directories_with_name(look_in_dir='',base_name='',verbose=False):
6916    '''
6917    Finds all the sww files in a "look_in_dir" which contains a "base_name".
6918   
6919   
6920    Returns: a list of strings
6921       
6922    Usage:     iterate_over = get_all_swwfiles(dir, name)
6923    then
6924               for swwfile in iterate_over:
6925                   do stuff
6926                   
6927    Check "export_grids" and "get_maximum_inundation_data" for examples
6928    '''
6929   
6930    #plus tests the extension
6931
6932    if look_in_dir == "":
6933        look_in_dir = "." # Unix compatibility
6934#    print 'look_in_dir',look_in_dir
6935    dir_ls = os.listdir(look_in_dir)
6936#    print 'dir_ls',dir_ls
6937    iterate_over = [x for x in dir_ls if base_name in x]
6938    if len(iterate_over) == 0:
6939        msg = 'No files of the base name %s'\
6940              %(name)
6941        raise IOError, msg
6942    if verbose: print 'iterate over %s' %(iterate_over)
6943
6944    return iterate_over
6945
6946def points2polygon(points_file,
6947                   minimum_triangle_angle=3.0):
6948    """
6949    WARNING: This function is not fully working. 
6950   
6951    Function to return a polygon returned from alpha shape, given a points file.
6952   
6953    WARNING: Alpha shape returns multiple polygons, but this function only returns one polygon.
6954   
6955    """
6956    from anuga.pmesh.mesh import Mesh, importMeshFromFile
6957    from anuga.shallow_water import Domain   
6958    from anuga.pmesh.mesh_interface import create_mesh_from_regions
6959   
6960    mesh = importMeshFromFile(points_file)
6961    mesh.auto_segment()
6962    mesh.exportASCIIsegmentoutlinefile("outline.tsh")
6963    mesh2 = importMeshFromFile("outline.tsh")
6964    mesh2.generate_mesh(maximum_triangle_area=1000000000, minimum_triangle_angle=minimum_triangle_angle, verbose=False)
6965    mesh2.export_mesh_file('outline_meshed.tsh')
6966    domain = Domain("outline_meshed.tsh", use_cache = False)
6967    polygon =  domain.get_boundary_polygon()
6968    return polygon
6969
6970#-------------------------------------------------------------
6971if __name__ == "__main__":
6972    #setting umask from config to force permissions for all files and directories
6973    # created to the same. (it was noticed the "mpirun" doesn't honour the umask
6974    # set in your .bashrc etc file)
6975    from config import umask
6976    import os 
6977    os.umask(umask)
Note: See TracBrowser for help on using the repository browser.