source: anuga_core/source/anuga/shallow_water/data_manager.py @ 5606

Last change on this file since 5606 was 5606, checked in by kristy, 16 years ago

Converting quantities: changing NODATA to 0.0 for ha, ua and va (stage, xmomentum, ymomentum)

File size: 222.2 KB
Line 
1"""datamanager.py - input output for AnuGA
2
3
4This module takes care of reading and writing datafiles such as topograhies,
5model output, etc
6
7
8Formats used within AnuGA:
9
10.sww: Netcdf format for storing model output f(t,x,y)
11.tms: Netcdf format for storing time series f(t)
12
13.csv: ASCII format for storing arbitrary points and associated attributes
14.pts: NetCDF format for storing arbitrary points and associated attributes
15
16.asc: ASCII format of regular DEMs as output from ArcView
17.prj: Associated ArcView file giving more meta data for asc format
18.ers: ERMapper header format of regular DEMs for ArcView
19
20.dem: NetCDF representation of regular DEM data
21
22.tsh: ASCII format for storing meshes and associated boundary and region info
23.msh: NetCDF format for storing meshes and associated boundary and region info
24
25.nc: Native ferret NetCDF format
26.geo: Houdinis ascii geometry format (?)
27
28
29A typical dataflow can be described as follows
30
31Manually created files:
32ASC, PRJ:     Digital elevation models (gridded)
33TSH:          Triangular meshes (e.g. created from anuga.pmesh)
34NC            Model outputs for use as boundary conditions (e.g from MOST)
35
36
37AUTOMATICALLY CREATED FILES:
38
39ASC, PRJ  ->  DEM  ->  PTS: Conversion of DEM's to native pts file
40
41NC -> SWW: Conversion of MOST bundary files to boundary sww
42
43PTS + TSH -> TSH with elevation: Least squares fit
44
45TSH -> SWW:  Conversion of TSH to sww viewable using Swollen
46
47TSH + Boundary SWW -> SWW: Simluation using abstract_2d_finite_volumes
48
49"""
50
51# This file was reverted from changeset:5484 to changeset:5470 on 10th July
52# by Ole.
53
54import exceptions
55class TitleValueError(exceptions.Exception): pass
56class DataMissingValuesError(exceptions.Exception): pass
57class DataFileNotOpenError(exceptions.Exception): pass
58class DataTimeError(exceptions.Exception): pass
59class DataDomainError(exceptions.Exception): pass
60class NewQuantity(exceptions.Exception): pass
61
62
63
64import csv
65import os, sys
66import shutil
67from struct import unpack
68import array as p_array
69#import time, os
70from os import sep, path, remove, mkdir, access, F_OK, W_OK, getcwd
71
72
73from Numeric import concatenate, array, Float, Int, Int32, resize, \
74     sometrue, searchsorted, zeros, allclose, around, reshape, \
75     transpose, sort, NewAxis, ArrayType, compress, take, arange, \
76     argmax, alltrue, shape, Float32, size
77
78import string
79
80from Scientific.IO.NetCDF import NetCDFFile
81#from shutil import copy
82from os.path import exists, basename, join
83from os import getcwd
84
85
86from anuga.coordinate_transforms.redfearn import redfearn, \
87     convert_from_latlon_to_utm
88from anuga.coordinate_transforms.geo_reference import Geo_reference, \
89     write_NetCDF_georeference, ensure_geo_reference
90from anuga.geospatial_data.geospatial_data import Geospatial_data,\
91     ensure_absolute
92from anuga.config import minimum_storable_height as default_minimum_storable_height
93from anuga.config import max_float
94from anuga.utilities.numerical_tools import ensure_numeric,  mean
95from anuga.caching.caching import myhash
96from anuga.utilities.anuga_exceptions import ANUGAError
97from anuga.shallow_water import Domain
98from anuga.abstract_2d_finite_volumes.pmesh2domain import \
99     pmesh_to_domain_instance
100from anuga.abstract_2d_finite_volumes.util import get_revision_number, \
101     remove_lone_verts, sww2timeseries, get_centroid_values
102from anuga.load_mesh.loadASCII import export_mesh_file
103from anuga.utilities.polygon import intersection
104
105
106# formula mappings
107
108quantity_formula = {'momentum':'(xmomentum**2 + ymomentum**2)**0.5',
109                    'depth':'stage-elevation',
110                    'speed': \
111 '(xmomentum**2 + ymomentum**2)**0.5/(stage-elevation+1.e-6/(stage-elevation))'}
112
113
114   
115def make_filename(s):
116    """Transform argument string into a Sexsuitable filename
117    """
118
119    s = s.strip()
120    s = s.replace(' ', '_')
121    s = s.replace('(', '')
122    s = s.replace(')', '')
123    s = s.replace('__', '_')
124
125    return s
126
127
128def check_dir(path, verbose=None):
129    """Check that specified path exists.
130    If path does not exist it will be created if possible
131
132    USAGE:
133       checkdir(path, verbose):
134
135    ARGUMENTS:
136        path -- Directory
137        verbose -- Flag verbose output (default: None)
138
139    RETURN VALUE:
140        Verified path including trailing separator
141
142    """
143
144    import os.path
145
146    if sys.platform in ['nt', 'dos', 'win32', 'what else?']:
147        unix = 0
148    else:
149        unix = 1
150
151
152    if path[-1] != os.sep:
153        path = path + os.sep  # Add separator for directories
154
155    path = os.path.expanduser(path) # Expand ~ or ~user in pathname
156    if not (os.access(path,os.R_OK and os.W_OK) or path == ''):
157        try:
158            exitcode=os.mkdir(path)
159
160            # Change access rights if possible
161            #
162            if unix:
163                exitcode=os.system('chmod 775 '+path)
164            else:
165                pass  # FIXME: What about acces rights under Windows?
166
167            if verbose: print 'MESSAGE: Directory', path, 'created.'
168
169        except:
170            print 'WARNING: Directory', path, 'could not be created.'
171            if unix:
172                path = '/tmp/'
173            else:
174                path = 'C:'
175
176            print 'Using directory %s instead' %path
177
178    return(path)
179
180
181
182def del_dir(path):
183    """Recursively delete directory path and all its contents
184    """
185
186    import os
187
188    if os.path.isdir(path):
189        for file in os.listdir(path):
190            X = os.path.join(path, file)
191
192
193            if os.path.isdir(X) and not os.path.islink(X):
194                del_dir(X)
195            else:
196                try:
197                    os.remove(X)
198                except:
199                    print "Could not remove file %s" %X
200
201        os.rmdir(path)
202       
203       
204# ANOTHER OPTION, IF NEED IN THE FUTURE, Nick B 7/2007   
205def rmgeneric(path, __func__,verbose=False):
206    ERROR_STR= """Error removing %(path)s, %(error)s """
207
208    try:
209        __func__(path)
210        if verbose: print 'Removed ', path
211    except OSError, (errno, strerror):
212        print ERROR_STR % {'path' : path, 'error': strerror }
213           
214def removeall(path,verbose=False):
215
216    if not os.path.isdir(path):
217        return
218   
219    files=os.listdir(path)
220
221    for x in files:
222        fullpath=os.path.join(path, x)
223        if os.path.isfile(fullpath):
224            f=os.remove
225            rmgeneric(fullpath, f)
226        elif os.path.isdir(fullpath):
227            removeall(fullpath)
228            f=os.rmdir
229            rmgeneric(fullpath, f,verbose)
230
231
232
233def create_filename(datadir, filename, format, size=None, time=None):
234
235    import os
236    #from anuga.config import data_dir
237
238    FN = check_dir(datadir) + filename
239
240    if size is not None:
241        FN += '_size%d' %size
242
243    if time is not None:
244        FN += '_time%.2f' %time
245
246    FN += '.' + format
247    return FN
248
249
250def get_files(datadir, filename, format, size):
251    """Get all file (names) with given name, size and format
252    """
253
254    import glob
255
256    import os
257    #from anuga.config import data_dir
258
259    dir = check_dir(datadir)
260
261    pattern = dir + os.sep + filename + '_size=%d*.%s' %(size, format)
262    return glob.glob(pattern)
263
264
265
266#Generic class for storing output to e.g. visualisation or checkpointing
267class Data_format:
268    """Generic interface to data formats
269    """
270
271
272    def __init__(self, domain, extension, mode = 'w'):
273        assert mode in ['r', 'w', 'a'], '''Mode %s must be either:''' %mode +\
274                                        '''   'w' (write)'''+\
275                                        '''   'r' (read)''' +\
276                                        '''   'a' (append)'''
277
278        #Create filename
279        self.filename = create_filename(domain.get_datadir(),
280                                        domain.get_name(), extension)
281
282        #print 'F', self.filename
283        self.timestep = 0
284        self.domain = domain
285       
286
287
288        # Exclude ghosts in case this is a parallel domain
289        self.number_of_nodes = domain.number_of_full_nodes       
290        self.number_of_volumes = domain.number_of_full_triangles
291        #self.number_of_volumes = len(domain)       
292
293
294
295
296        #FIXME: Should we have a general set_precision function?
297
298
299
300#Class for storing output to e.g. visualisation
301class Data_format_sww(Data_format):
302    """Interface to native NetCDF format (.sww) for storing model output
303
304    There are two kinds of data
305
306    1: Constant data: Vertex coordinates and field values. Stored once
307    2: Variable data: Conserved quantities. Stored once per timestep.
308
309    All data is assumed to reside at vertex locations.
310    """
311
312
313    def __init__(self, domain, mode = 'w',\
314                 max_size = 2000000000,
315                 recursion = False):
316        from Scientific.IO.NetCDF import NetCDFFile
317        from Numeric import Int, Float, Float32
318
319        self.precision = Float32 #Use single precision for quantities
320        if hasattr(domain, 'max_size'):
321            self.max_size = domain.max_size #file size max is 2Gig
322        else:
323            self.max_size = max_size
324        self.recursion = recursion
325        self.mode = mode
326
327        Data_format.__init__(self, domain, 'sww', mode)
328
329        if hasattr(domain, 'minimum_storable_height'):
330            self.minimum_storable_height = domain.minimum_storable_height
331        else:
332            self.minimum_storable_height = default_minimum_storable_height
333
334        # NetCDF file definition
335        fid = NetCDFFile(self.filename, mode)
336
337        if mode == 'w':
338            description = 'Output from anuga.abstract_2d_finite_volumes suitable for plotting'
339            self.writer = Write_sww()
340            self.writer.store_header(fid,
341                                     domain.starttime,
342                                     self.number_of_volumes,
343                                     self.domain.number_of_full_nodes,
344                                     description=description,
345                                     smoothing=domain.smooth,
346                                     order=domain.default_order,
347                                     sww_precision=self.precision)
348
349            # Extra optional information
350            if hasattr(domain, 'texture'):
351                fid.texture = domain.texture
352
353            if domain.quantities_to_be_monitored is not None:
354                fid.createDimension('singleton', 1)
355                fid.createDimension('two', 2)               
356
357                poly = domain.monitor_polygon
358                if poly is not None:
359                    N = len(poly)
360                    fid.createDimension('polygon_length', N)
361                    fid.createVariable('extrema.polygon',
362                                       self.precision,
363                                       ('polygon_length',
364                                        'two'))
365                    fid.variables['extrema.polygon'][:] = poly                                   
366
367                   
368                interval = domain.monitor_time_interval
369                if interval is not None:
370                    fid.createVariable('extrema.time_interval',
371                                       self.precision,
372                                       ('two',))
373                    fid.variables['extrema.time_interval'][:] = interval
374
375               
376                for q in domain.quantities_to_be_monitored:
377                    #print 'doing', q
378                    fid.createVariable(q+'.extrema', self.precision,
379                                       ('numbers_in_range',))
380                    fid.createVariable(q+'.min_location', self.precision,
381                                       ('numbers_in_range',))
382                    fid.createVariable(q+'.max_location', self.precision,
383                                       ('numbers_in_range',))
384                    fid.createVariable(q+'.min_time', self.precision,
385                                       ('singleton',))
386                    fid.createVariable(q+'.max_time', self.precision,
387                                       ('singleton',))
388
389                   
390        fid.close()
391
392
393    def store_connectivity(self):
394        """Specialisation of store_connectivity for net CDF format
395
396        Writes x,y,z coordinates of triangles constituting
397        the bed elevation.
398        """
399
400        from Scientific.IO.NetCDF import NetCDFFile
401
402        from Numeric import concatenate, Int
403
404        domain = self.domain
405
406        #Get NetCDF
407        fid = NetCDFFile(self.filename, 'a')  #Open existing file for append
408
409        # Get the variables
410        x = fid.variables['x']
411        y = fid.variables['y']
412        z = fid.variables['elevation']
413
414        volumes = fid.variables['volumes']
415
416        # Get X, Y and bed elevation Z
417        Q = domain.quantities['elevation']
418        X,Y,Z,V = Q.get_vertex_values(xy=True,
419                                      precision=self.precision)
420
421        #
422        points = concatenate( (X[:,NewAxis],Y[:,NewAxis]), axis=1 )
423        self.writer.store_triangulation(fid,
424                                        points,
425                                        V.astype(volumes.typecode()),
426                                        Z,
427                                        points_georeference= \
428                                        domain.geo_reference)
429
430        # Close
431        fid.close()
432
433
434    def store_timestep(self, names=None):
435        """Store time and named quantities to file
436        """
437       
438        from Scientific.IO.NetCDF import NetCDFFile
439        import types
440        from time import sleep
441        from os import stat
442
443        from Numeric import choose
444
445
446        if names is None:
447            # Standard shallow water wave equation quantitites in ANUGA
448            names = ['stage', 'xmomentum', 'ymomentum']
449       
450        # Get NetCDF       
451        retries = 0
452        file_open = False
453        while not file_open and retries < 10:
454            try:
455                fid = NetCDFFile(self.filename, 'a') # Open existing file
456            except IOError:
457                # This could happen if someone was reading the file.
458                # In that case, wait a while and try again
459                msg = 'Warning (store_timestep): File %s could not be opened'\
460                      %self.filename
461                msg += ' - trying step %s again' %self.domain.time
462                print msg
463                retries += 1
464                sleep(1)
465            else:
466                file_open = True
467
468        if not file_open:
469            msg = 'File %s could not be opened for append' %self.filename
470            raise DataFileNotOpenError, msg
471
472
473
474        # Check to see if the file is already too big:
475        time = fid.variables['time']
476        i = len(time)+1
477        file_size = stat(self.filename)[6]
478        file_size_increase =  file_size/i
479        if file_size + file_size_increase > self.max_size*(2**self.recursion):
480            # In order to get the file name and start time correct,
481            # I change the domain.filename and domain.starttime.
482            # This is the only way to do this without changing
483            # other modules (I think).
484
485            # Write a filename addon that won't break swollens reader
486            # (10.sww is bad)
487            filename_ext = '_time_%s'%self.domain.time
488            filename_ext = filename_ext.replace('.', '_')
489           
490            # Remember the old filename, then give domain a
491            # name with the extension
492            old_domain_filename = self.domain.get_name()
493            if not self.recursion:
494                self.domain.set_name(old_domain_filename+filename_ext)
495
496
497            # Change the domain starttime to the current time
498            old_domain_starttime = self.domain.starttime
499            self.domain.starttime = self.domain.time
500
501            # Build a new data_structure.
502            next_data_structure=\
503                Data_format_sww(self.domain, mode=self.mode,\
504                                max_size = self.max_size,\
505                                recursion = self.recursion+1)
506            if not self.recursion:
507                print '    file_size = %s'%file_size
508                print '    saving file to %s'%next_data_structure.filename
509            #set up the new data_structure
510            self.domain.writer = next_data_structure
511
512            #FIXME - could be cleaner to use domain.store_timestep etc.
513            next_data_structure.store_connectivity()
514            next_data_structure.store_timestep(names)
515            fid.sync()
516            fid.close()
517
518            #restore the old starttime and filename
519            self.domain.starttime = old_domain_starttime
520            self.domain.set_name(old_domain_filename)           
521        else:
522            self.recursion = False
523            domain = self.domain
524
525            # Get the variables
526            time = fid.variables['time']
527            stage = fid.variables['stage']
528            xmomentum = fid.variables['xmomentum']
529            ymomentum = fid.variables['ymomentum']
530            i = len(time)
531            if type(names) not in [types.ListType, types.TupleType]:
532                names = [names]
533
534            if 'stage' in names and 'xmomentum' in names and \
535               'ymomentum' in names:
536
537                # Get stage, elevation, depth and select only those
538                # values where minimum_storable_height is exceeded
539                Q = domain.quantities['stage']
540                A, _ = Q.get_vertex_values(xy = False,
541                                           precision = self.precision)
542                z = fid.variables['elevation']
543
544                storable_indices = A-z[:] >= self.minimum_storable_height
545                stage = choose(storable_indices, (z[:], A))
546               
547                # Define a zero vector of same size and type as A
548                # for use with momenta
549                null = zeros(size(A), A.typecode())
550               
551                # Get xmomentum where depth exceeds minimum_storable_height
552                Q = domain.quantities['xmomentum']
553                xmom, _ = Q.get_vertex_values(xy = False,
554                                              precision = self.precision)
555                xmomentum = choose(storable_indices, (null, xmom))
556               
557
558                # Get ymomentum where depth exceeds minimum_storable_height
559                Q = domain.quantities['ymomentum']
560                ymom, _ = Q.get_vertex_values(xy = False,
561                                              precision = self.precision)
562                ymomentum = choose(storable_indices, (null, ymom))               
563               
564                # Write quantities to NetCDF
565                self.writer.store_quantities(fid, 
566                                             time=self.domain.time,
567                                             sww_precision=self.precision,
568                                             stage=stage,
569                                             xmomentum=xmomentum,
570                                             ymomentum=ymomentum)
571            else:
572                msg = 'Quantities stored must be: stage, xmomentum, ymomentum.'
573                msg += ' Instead I got: ' + str(names)
574                raise Exception, msg
575           
576
577
578            # Update extrema if requested
579            domain = self.domain
580            if domain.quantities_to_be_monitored is not None:
581                for q, info in domain.quantities_to_be_monitored.items():
582
583                    if info['min'] is not None:
584                        fid.variables[q + '.extrema'][0] = info['min']
585                        fid.variables[q + '.min_location'][:] =\
586                                        info['min_location']
587                        fid.variables[q + '.min_time'][0] = info['min_time']
588                       
589                    if info['max'] is not None:
590                        fid.variables[q + '.extrema'][1] = info['max']
591                        fid.variables[q + '.max_location'][:] =\
592                                        info['max_location']
593                        fid.variables[q + '.max_time'][0] = info['max_time']
594
595           
596
597            # Flush and close
598            fid.sync()
599            fid.close()
600
601
602
603# Class for handling checkpoints data
604class Data_format_cpt(Data_format):
605    """Interface to native NetCDF format (.cpt)
606    """
607
608
609    def __init__(self, domain, mode = 'w'):
610        from Scientific.IO.NetCDF import NetCDFFile
611        from Numeric import Int, Float, Float
612
613        self.precision = Float #Use full precision
614
615        Data_format.__init__(self, domain, 'sww', mode)
616
617
618        # NetCDF file definition
619        fid = NetCDFFile(self.filename, mode)
620
621        if mode == 'w':
622            #Create new file
623            fid.institution = 'Geoscience Australia'
624            fid.description = 'Checkpoint data'
625            #fid.smooth = domain.smooth
626            fid.order = domain.default_order
627
628            # dimension definitions
629            fid.createDimension('number_of_volumes', self.number_of_volumes)
630            fid.createDimension('number_of_vertices', 3)
631
632            #Store info at all vertices (no smoothing)
633            fid.createDimension('number_of_points', 3*self.number_of_volumes)
634            fid.createDimension('number_of_timesteps', None) #extensible
635
636            # variable definitions
637
638            #Mesh
639            fid.createVariable('x', self.precision, ('number_of_points',))
640            fid.createVariable('y', self.precision, ('number_of_points',))
641
642
643            fid.createVariable('volumes', Int, ('number_of_volumes',
644                                                'number_of_vertices'))
645
646            fid.createVariable('time', self.precision,
647                               ('number_of_timesteps',))
648
649            #Allocate space for all quantities
650            for name in domain.quantities.keys():
651                fid.createVariable(name, self.precision,
652                                   ('number_of_timesteps',
653                                    'number_of_points'))
654
655        #Close
656        fid.close()
657
658
659    def store_checkpoint(self):
660        """
661        Write x,y coordinates of triangles.
662        Write connectivity (
663        constituting
664        the bed elevation.
665        """
666
667        from Scientific.IO.NetCDF import NetCDFFile
668
669        from Numeric import concatenate
670
671        domain = self.domain
672
673        #Get NetCDF
674        fid = NetCDFFile(self.filename, 'a')  #Open existing file for append
675
676        # Get the variables
677        x = fid.variables['x']
678        y = fid.variables['y']
679
680        volumes = fid.variables['volumes']
681
682        # Get X, Y and bed elevation Z
683        Q = domain.quantities['elevation']
684        X,Y,Z,V = Q.get_vertex_values(xy=True,
685                      precision = self.precision)
686
687
688
689        x[:] = X.astype(self.precision)
690        y[:] = Y.astype(self.precision)
691        z[:] = Z.astype(self.precision)
692
693        volumes[:] = V
694
695        #Close
696        fid.close()
697
698
699    def store_timestep(self, name):
700        """Store time and named quantity to file
701        """
702        from Scientific.IO.NetCDF import NetCDFFile
703        from time import sleep
704
705        #Get NetCDF
706        retries = 0
707        file_open = False
708        while not file_open and retries < 10:
709            try:
710                fid = NetCDFFile(self.filename, 'a') #Open existing file
711            except IOError:
712                #This could happen if someone was reading the file.
713                #In that case, wait a while and try again
714                msg = 'Warning (store_timestep): File %s could not be opened'\
715                  %self.filename
716                msg += ' - trying again'
717                print msg
718                retries += 1
719                sleep(1)
720            else:
721                file_open = True
722
723        if not file_open:
724            msg = 'File %s could not be opened for append' %self.filename
725            raise DataFileNotOPenError, msg
726
727
728        domain = self.domain
729
730        # Get the variables
731        time = fid.variables['time']
732        stage = fid.variables['stage']
733        i = len(time)
734
735        #Store stage
736        time[i] = self.domain.time
737
738        # Get quantity
739        Q = domain.quantities[name]
740        A,V = Q.get_vertex_values(xy=False,
741                                  precision = self.precision)
742
743        stage[i,:] = A.astype(self.precision)
744
745        #Flush and close
746        fid.sync()
747        fid.close()
748
749
750#### NED is national exposure database (name changed to NEXIS)
751   
752LAT_TITLE = 'LATITUDE'
753LONG_TITLE = 'LONGITUDE'
754X_TITLE = 'x'
755Y_TITLE = 'y'
756class Exposure_csv:
757    def __init__(self,file_name, latitude_title=LAT_TITLE,
758                 longitude_title=LONG_TITLE, is_x_y_locations=None,
759                 x_title=X_TITLE, y_title=Y_TITLE,
760                 refine_polygon=None, title_check_list=None):
761        """
762        This class is for handling the exposure csv file.
763        It reads the file in and converts the lats and longs to a geospatial
764        data object.
765        Use the methods to read and write columns.
766
767        The format of the csv files it reads is;
768           The first row is a title row.
769           comma's are the delimiters
770           each column is a 'set' of data
771
772        Feel free to use/expand it to read other csv files.
773           
774           
775        It is not for adding and deleting rows
776       
777        Can geospatial handle string attributes? It's not made for them.
778        Currently it can't load and save string att's.
779
780        So just use geospatial to hold the x, y and georef? Bad, since
781        different att's are in diferent structures.  Not so bad, the info
782        to write if the .csv file is saved is in attribute_dic
783
784        The location info is in the geospatial attribute.
785       
786       
787        """
788        self._file_name = file_name
789        self._geospatial = None #
790
791        # self._attribute_dic is a dictionary.
792        #The keys are the column titles.
793        #The values are lists of column data
794       
795        # self._title_index_dic is a dictionary.
796        #The keys are the column titles.
797        #The values are the index positions of file columns.
798        self._attribute_dic, self._title_index_dic = \
799            csv2dict(self._file_name, title_check_list=title_check_list)
800        try:
801            #Have code here that handles caps or lower
802            lats = self._attribute_dic[latitude_title]
803            longs = self._attribute_dic[longitude_title]
804           
805        except KeyError:
806            # maybe a warning..
807            #Let's see if this works..
808            if False != is_x_y_locations:
809                is_x_y_locations = True
810            pass
811        else:
812            self._geospatial = Geospatial_data(latitudes = lats,
813                 longitudes = longs)
814
815        if is_x_y_locations is True:
816            if self._geospatial is not None:
817                pass #fixme throw an error
818            try:
819                xs = self._attribute_dic[x_title]
820                ys = self._attribute_dic[y_title]
821                points = [[float(i),float(j)] for i,j in map(None,xs,ys)]
822            except KeyError:
823                # maybe a warning..
824                msg = "Could not find location information."
825                raise TitleValueError, msg
826            else:
827                self._geospatial = Geospatial_data(data_points=points)
828           
829        # create a list of points that are in the refining_polygon
830        # described by a list of indexes representing the points
831
832    def __cmp__(self, other):
833        #print "self._attribute_dic",self._attribute_dic
834        #print "other._attribute_dic",other._attribute_dic
835        #print "self._title_index_dic", self._title_index_dic
836        #print "other._title_index_dic", other._title_index_dic
837       
838        #check that a is an instance of this class
839        if isinstance(self, type(other)):
840            result = cmp(self._attribute_dic, other._attribute_dic)
841            if result <>0:
842                return result
843            # The order of the columns is important. Therefore..
844            result = cmp(self._title_index_dic, other._title_index_dic)
845            if result <>0:
846                return result
847            for self_ls, other_ls in map(None,self._attribute_dic, \
848                    other._attribute_dic):
849                result = cmp(self._attribute_dic[self_ls],
850                             other._attribute_dic[other_ls])
851                if result <>0:
852                    return result
853            return 0
854        else:
855            return 1
856   
857
858    def get_column(self, column_name, use_refind_polygon=False):
859        """
860        Given a column name return a list of the column values
861
862        Note, the type of the values will be String!
863        do this to change a list of strings to a list of floats
864        time = [float(x) for x in time]
865       
866        Not implemented:
867        if use_refind_polygon is True, only return values in the
868        refined polygon
869        """
870        if not self._attribute_dic.has_key(column_name):
871            msg = 'Therer is  no column called %s!' %column_name
872            raise TitleValueError, msg
873        return self._attribute_dic[column_name]
874
875
876    def get_value(self, value_column_name,
877                  known_column_name,
878                  known_values,
879                  use_refind_polygon=False):
880        """
881        Do linear interpolation on the known_colum, using the known_value,
882        to return a value of the column_value_name.
883        """
884        pass
885
886
887    def get_location(self, use_refind_polygon=False):
888        """
889        Return a geospatial object which describes the
890        locations of the location file.
891
892        Note, if there is not location info, this returns None.
893       
894        Not implemented:
895        if use_refind_polygon is True, only return values in the
896        refined polygon
897        """
898        return self._geospatial
899
900    def set_column(self, column_name, column_values, overwrite=False):
901        """
902        Add a column to the 'end' (with the right most column being the end)
903        of the csv file.
904
905        Set overwrite to True if you want to overwrite a column.
906       
907        Note, in column_name white space is removed and case is not checked.
908        Precondition
909        The column_name and column_values cannot have comma's in it.
910        """
911       
912        value_row_count = \
913            len(self._attribute_dic[self._title_index_dic.keys()[0]])
914        if len(column_values) <> value_row_count: 
915            msg = 'The number of column values must equal the number of rows.'
916            raise DataMissingValuesError, msg
917       
918        if self._attribute_dic.has_key(column_name):
919            if not overwrite:
920                msg = 'Column name %s already in use!' %column_name
921                raise TitleValueError, msg
922        else:
923            # New title.  Add it to the title index.
924            self._title_index_dic[column_name] = len(self._title_index_dic)
925        self._attribute_dic[column_name] = column_values
926        #print "self._title_index_dic[column_name]",self._title_index_dic
927
928    def save(self, file_name=None):
929        """
930        Save the exposure csv file
931        """
932        if file_name is None:
933            file_name = self._file_name
934       
935        fd = open(file_name,'wb')
936        writer = csv.writer(fd)
937       
938        #Write the title to a cvs file
939        line = [None]* len(self._title_index_dic)
940        for title in self._title_index_dic.iterkeys():
941            line[self._title_index_dic[title]]= title
942        writer.writerow(line)
943       
944        # Write the values to a cvs file
945        value_row_count = \
946            len(self._attribute_dic[self._title_index_dic.keys()[0]])
947        for row_i in range(value_row_count):
948            line = [None]* len(self._title_index_dic)
949            for title in self._title_index_dic.iterkeys():
950                line[self._title_index_dic[title]]= \
951                     self._attribute_dic[title][row_i]
952            writer.writerow(line)
953
954
955def csv2array(file_name):
956    """Convert CSV files of the form
957   
958    time, discharge, velocity
959    0.0,  1.2,       0.0
960    0.1,  3.2,       1.1
961    ...
962   
963    to a dictionary of numeric arrays.
964   
965   
966    See underlying function csv2dict for more details.
967   
968    """
969   
970   
971    X, _ = csv2dict(file_name)
972   
973    Y = {}
974    for key in X.keys():
975        Y[key] = array([float(x) for x in X[key]])
976       
977    return Y   
978   
979           
980def csv2dict(file_name, title_check_list=None):
981    """
982    Load in the csv as a dic, title as key and column info as value, .
983    Also, create a dic, title as key and column index as value,
984    to keep track of the column order.
985
986    Two dictionaries are returned.
987   
988    WARNING: Values are returned as strings.
989    do this to change a list of strings to a list of floats
990        time = [float(x) for x in time]
991
992       
993    """
994   
995    #
996    attribute_dic = {}
997    title_index_dic = {}
998    titles_stripped = [] # list of titles
999    reader = csv.reader(file(file_name))
1000
1001    # Read in and manipulate the title info
1002    titles = reader.next()
1003    for i,title in enumerate(titles):
1004        titles_stripped.append(title.strip())
1005        title_index_dic[title.strip()] = i
1006    title_count = len(titles_stripped)       
1007    #print "title_index_dic",title_index_dic
1008    if title_check_list is not None:
1009        for title_check in title_check_list:
1010            #msg = "Reading error.  This row is not present ", title_check
1011            #assert title_index_dic.has_key(title_check), msg
1012            if not title_index_dic.has_key(title_check):
1013                #reader.close()
1014                msg = "Reading error.  This row is not present ", \
1015                      title_check                     
1016                raise IOError, msg
1017               
1018   
1019   
1020    #create a dic of colum values, indexed by column title
1021    for line in reader:
1022        if len(line) <> title_count:
1023            raise IOError #FIXME make this nicer
1024        for i, value in enumerate(line):
1025            attribute_dic.setdefault(titles_stripped[i],[]).append(value)
1026       
1027    return attribute_dic, title_index_dic
1028
1029
1030#Auxiliary
1031def write_obj(filename,x,y,z):
1032    """Store x,y,z vectors into filename (obj format)
1033       Vectors are assumed to have dimension (M,3) where
1034       M corresponds to the number elements.
1035       triangles are assumed to be disconnected
1036
1037       The three numbers in each vector correspond to three vertices,
1038
1039       e.g. the x coordinate of vertex 1 of element i is in x[i,1]
1040
1041    """
1042    #print 'Writing obj to %s' % filename
1043
1044    import os.path
1045
1046    root, ext = os.path.splitext(filename)
1047    if ext == '.obj':
1048        FN = filename
1049    else:
1050        FN = filename + '.obj'
1051
1052
1053    outfile = open(FN, 'wb')
1054    outfile.write("# Triangulation as an obj file\n")
1055
1056    M, N = x.shape
1057    assert N==3  #Assuming three vertices per element
1058
1059    for i in range(M):
1060        for j in range(N):
1061            outfile.write("v %f %f %f\n" % (x[i,j],y[i,j],z[i,j]))
1062
1063    for i in range(M):
1064        base = i*N
1065        outfile.write("f %d %d %d\n" % (base+1,base+2,base+3))
1066
1067    outfile.close()
1068
1069
1070#########################################################
1071#Conversion routines
1072########################################################
1073
1074def sww2obj(basefilename, size):
1075    """Convert netcdf based data output to obj
1076    """
1077    from Scientific.IO.NetCDF import NetCDFFile
1078
1079    from Numeric import Float, zeros
1080
1081    #Get NetCDF
1082    FN = create_filename('.', basefilename, 'sww', size)
1083    print 'Reading from ', FN
1084    fid = NetCDFFile(FN, 'r')  #Open existing file for read
1085
1086
1087    # Get the variables
1088    x = fid.variables['x']
1089    y = fid.variables['y']
1090    z = fid.variables['elevation']
1091    time = fid.variables['time']
1092    stage = fid.variables['stage']
1093
1094    M = size  #Number of lines
1095    xx = zeros((M,3), Float)
1096    yy = zeros((M,3), Float)
1097    zz = zeros((M,3), Float)
1098
1099    for i in range(M):
1100        for j in range(3):
1101            xx[i,j] = x[i+j*M]
1102            yy[i,j] = y[i+j*M]
1103            zz[i,j] = z[i+j*M]
1104
1105    #Write obj for bathymetry
1106    FN = create_filename('.', basefilename, 'obj', size)
1107    write_obj(FN,xx,yy,zz)
1108
1109
1110    #Now read all the data with variable information, combine with
1111    #x,y info and store as obj
1112
1113    for k in range(len(time)):
1114        t = time[k]
1115        print 'Processing timestep %f' %t
1116
1117        for i in range(M):
1118            for j in range(3):
1119                zz[i,j] = stage[k,i+j*M]
1120
1121
1122        #Write obj for variable data
1123        #FN = create_filename(basefilename, 'obj', size, time=t)
1124        FN = create_filename('.', basefilename[:5], 'obj', size, time=t)
1125        write_obj(FN,xx,yy,zz)
1126
1127
1128def dat2obj(basefilename):
1129    """Convert line based data output to obj
1130    FIXME: Obsolete?
1131    """
1132
1133    import glob, os
1134    from anuga.config import data_dir
1135
1136
1137    #Get bathymetry and x,y's
1138    lines = open(data_dir+os.sep+basefilename+'_geometry.dat', 'r').readlines()
1139
1140    from Numeric import zeros, Float
1141
1142    M = len(lines)  #Number of lines
1143    x = zeros((M,3), Float)
1144    y = zeros((M,3), Float)
1145    z = zeros((M,3), Float)
1146
1147    ##i = 0
1148    for i, line in enumerate(lines):
1149        tokens = line.split()
1150        values = map(float,tokens)
1151
1152        for j in range(3):
1153            x[i,j] = values[j*3]
1154            y[i,j] = values[j*3+1]
1155            z[i,j] = values[j*3+2]
1156
1157        ##i += 1
1158
1159
1160    #Write obj for bathymetry
1161    write_obj(data_dir+os.sep+basefilename+'_geometry',x,y,z)
1162
1163
1164    #Now read all the data files with variable information, combine with
1165    #x,y info
1166    #and store as obj
1167
1168    files = glob.glob(data_dir+os.sep+basefilename+'*.dat')
1169
1170    for filename in files:
1171        print 'Processing %s' % filename
1172
1173        lines = open(data_dir+os.sep+filename,'r').readlines()
1174        assert len(lines) == M
1175        root, ext = os.path.splitext(filename)
1176
1177        #Get time from filename
1178        i0 = filename.find('_time=')
1179        if i0 == -1:
1180            #Skip bathymetry file
1181            continue
1182
1183        i0 += 6  #Position where time starts
1184        i1 = filename.find('.dat')
1185
1186        if i1 > i0:
1187            t = float(filename[i0:i1])
1188        else:
1189            raise DataTimeError, 'Hmmmm'
1190
1191
1192
1193        ##i = 0
1194        for i, line in enumerate(lines):
1195            tokens = line.split()
1196            values = map(float,tokens)
1197
1198            for j in range(3):
1199                z[i,j] = values[j]
1200
1201            ##i += 1
1202
1203        #Write obj for variable data
1204        write_obj(data_dir+os.sep+basefilename+'_time=%.4f' %t,x,y,z)
1205
1206
1207def filter_netcdf(filename1, filename2, first=0, last=None, step = 1):
1208    """Read netcdf filename1, pick timesteps first:step:last and save to
1209    nettcdf file filename2
1210    """
1211    from Scientific.IO.NetCDF import NetCDFFile
1212
1213    #Get NetCDF
1214    infile = NetCDFFile(filename1, 'r')  #Open existing file for read
1215    outfile = NetCDFFile(filename2, 'w')  #Open new file
1216
1217
1218    #Copy dimensions
1219    for d in infile.dimensions:
1220        outfile.createDimension(d, infile.dimensions[d])
1221
1222    for name in infile.variables:
1223        var = infile.variables[name]
1224        outfile.createVariable(name, var.typecode(), var.dimensions)
1225
1226
1227    #Copy the static variables
1228    for name in infile.variables:
1229        if name == 'time' or name == 'stage':
1230            pass
1231        else:
1232            #Copy
1233            outfile.variables[name][:] = infile.variables[name][:]
1234
1235    #Copy selected timesteps
1236    time = infile.variables['time']
1237    stage = infile.variables['stage']
1238
1239    newtime = outfile.variables['time']
1240    newstage = outfile.variables['stage']
1241
1242    if last is None:
1243        last = len(time)
1244
1245    selection = range(first, last, step)
1246    for i, j in enumerate(selection):
1247        print 'Copying timestep %d of %d (%f)' %(j, last-first, time[j])
1248        newtime[i] = time[j]
1249        newstage[i,:] = stage[j,:]
1250
1251    #Close
1252    infile.close()
1253    outfile.close()
1254
1255
1256#Get data objects
1257def get_dataobject(domain, mode='w'):
1258    """Return instance of class of given format using filename
1259    """
1260
1261    cls = eval('Data_format_%s' %domain.format)
1262    return cls(domain, mode)
1263
1264
1265
1266
1267def dem2pts(basename_in, basename_out=None,
1268            easting_min=None, easting_max=None,
1269            northing_min=None, northing_max=None,
1270            use_cache=False, verbose=False,):
1271    """Read Digitial Elevation model from the following NetCDF format (.dem)
1272
1273    Example:
1274
1275    ncols         3121
1276    nrows         1800
1277    xllcorner     722000
1278    yllcorner     5893000
1279    cellsize      25
1280    NODATA_value  -9999
1281    138.3698 137.4194 136.5062 135.5558 ..........
1282
1283    Convert to NetCDF pts format which is
1284
1285    points:  (Nx2) Float array
1286    elevation: N Float array
1287    """
1288
1289
1290
1291    kwargs = {'basename_out': basename_out,
1292              'easting_min': easting_min,
1293              'easting_max': easting_max,
1294              'northing_min': northing_min,
1295              'northing_max': northing_max,
1296              'verbose': verbose}
1297
1298    if use_cache is True:
1299        from caching import cache
1300        result = cache(_dem2pts, basename_in, kwargs,
1301                       dependencies = [basename_in + '.dem'],
1302                       verbose = verbose)
1303
1304    else:
1305        result = apply(_dem2pts, [basename_in], kwargs)
1306
1307    return result
1308
1309
1310def _dem2pts(basename_in, basename_out=None, verbose=False,
1311            easting_min=None, easting_max=None,
1312            northing_min=None, northing_max=None):
1313    """Read Digitial Elevation model from the following NetCDF format (.dem)
1314
1315    Internal function. See public function dem2pts for details.
1316    """
1317
1318    # FIXME: Can this be written feasibly using write_pts?
1319
1320    import os
1321    from Scientific.IO.NetCDF import NetCDFFile
1322    from Numeric import Float, zeros, reshape, sum
1323
1324    root = basename_in
1325
1326    # Get NetCDF
1327    infile = NetCDFFile(root + '.dem', 'r')  # Open existing netcdf file for read
1328
1329    if verbose: print 'Reading DEM from %s' %(root + '.dem')
1330
1331    ncols = infile.ncols[0]
1332    nrows = infile.nrows[0]
1333    xllcorner = infile.xllcorner[0]  # Easting of lower left corner
1334    yllcorner = infile.yllcorner[0]  # Northing of lower left corner
1335    cellsize = infile.cellsize[0]
1336    NODATA_value = infile.NODATA_value[0]
1337    dem_elevation = infile.variables['elevation']
1338
1339    zone = infile.zone[0]
1340    false_easting = infile.false_easting[0]
1341    false_northing = infile.false_northing[0]
1342
1343    # Text strings
1344    projection = infile.projection
1345    datum = infile.datum
1346    units = infile.units
1347
1348
1349    # Get output file
1350    if basename_out == None:
1351        ptsname = root + '.pts'
1352    else:
1353        ptsname = basename_out + '.pts'
1354
1355    if verbose: print 'Store to NetCDF file %s' %ptsname
1356    # NetCDF file definition
1357    outfile = NetCDFFile(ptsname, 'w')
1358
1359    # Create new file
1360    outfile.institution = 'Geoscience Australia'
1361    outfile.description = 'NetCDF pts format for compact and portable storage ' +\
1362                      'of spatial point data'
1363    # Assign default values
1364    if easting_min is None: easting_min = xllcorner
1365    if easting_max is None: easting_max = xllcorner + ncols*cellsize
1366    if northing_min is None: northing_min = yllcorner
1367    if northing_max is None: northing_max = yllcorner + nrows*cellsize
1368
1369    # Compute offsets to update georeferencing
1370    easting_offset = xllcorner - easting_min
1371    northing_offset = yllcorner - northing_min
1372
1373    # Georeferencing
1374    outfile.zone = zone
1375    outfile.xllcorner = easting_min # Easting of lower left corner
1376    outfile.yllcorner = northing_min # Northing of lower left corner
1377    outfile.false_easting = false_easting
1378    outfile.false_northing = false_northing
1379
1380    outfile.projection = projection
1381    outfile.datum = datum
1382    outfile.units = units
1383
1384
1385    # Grid info (FIXME: probably not going to be used, but heck)
1386    outfile.ncols = ncols
1387    outfile.nrows = nrows
1388
1389    dem_elevation_r = reshape(dem_elevation, (nrows, ncols))
1390    totalnopoints = nrows*ncols
1391
1392    # Calculating number of NODATA_values for each row in clipped region
1393    # FIXME: use array operations to do faster
1394    nn = 0
1395    k = 0
1396    i1_0 = 0
1397    j1_0 = 0
1398    thisj = 0
1399    thisi = 0
1400    for i in range(nrows):
1401        y = (nrows-i-1)*cellsize + yllcorner
1402        for j in range(ncols):
1403            x = j*cellsize + xllcorner
1404            if easting_min <= x <= easting_max and \
1405               northing_min <= y <= northing_max:
1406                thisj = j
1407                thisi = i
1408                if dem_elevation_r[i,j] == NODATA_value: nn += 1
1409
1410                if k == 0:
1411                    i1_0 = i
1412                    j1_0 = j
1413                k += 1
1414
1415    index1 = j1_0
1416    index2 = thisj
1417
1418    # Dimension definitions
1419    nrows_in_bounding_box = int(round((northing_max-northing_min)/cellsize))
1420    ncols_in_bounding_box = int(round((easting_max-easting_min)/cellsize))
1421
1422    clippednopoints = (thisi+1-i1_0)*(thisj+1-j1_0)
1423    nopoints = clippednopoints-nn
1424
1425    clipped_dem_elev = dem_elevation_r[i1_0:thisi+1,j1_0:thisj+1]
1426
1427    if verbose:
1428        print 'There are %d values in the elevation' %totalnopoints
1429        print 'There are %d values in the clipped elevation' %clippednopoints
1430        print 'There are %d NODATA_values in the clipped elevation' %nn
1431
1432    outfile.createDimension('number_of_points', nopoints)
1433    outfile.createDimension('number_of_dimensions', 2) #This is 2d data
1434
1435    # Variable definitions
1436    outfile.createVariable('points', Float, ('number_of_points',
1437                                             'number_of_dimensions'))
1438    outfile.createVariable('elevation', Float, ('number_of_points',))
1439
1440    # Get handles to the variables
1441    points = outfile.variables['points']
1442    elevation = outfile.variables['elevation']
1443
1444    lenv = index2-index1+1
1445    # Store data
1446    global_index = 0
1447    # for i in range(nrows):
1448    for i in range(i1_0,thisi+1,1):
1449        if verbose and i%((nrows+10)/10)==0:
1450            print 'Processing row %d of %d' %(i, nrows)
1451
1452        lower_index = global_index
1453
1454        v = dem_elevation_r[i,index1:index2+1]
1455        no_NODATA = sum(v == NODATA_value)
1456        if no_NODATA > 0:
1457            newcols = lenv - no_NODATA # ncols_in_bounding_box - no_NODATA
1458        else:
1459            newcols = lenv # ncols_in_bounding_box
1460
1461        telev = zeros(newcols, Float)
1462        tpoints = zeros((newcols, 2), Float)
1463
1464        local_index = 0
1465
1466        y = (nrows-i-1)*cellsize + yllcorner
1467        #for j in range(ncols):
1468        for j in range(j1_0,index2+1,1):
1469
1470            x = j*cellsize + xllcorner
1471            if easting_min <= x <= easting_max and \
1472               northing_min <= y <= northing_max and \
1473               dem_elevation_r[i,j] <> NODATA_value:
1474                tpoints[local_index, :] = [x-easting_min,y-northing_min]
1475                telev[local_index] = dem_elevation_r[i, j]
1476                global_index += 1
1477                local_index += 1
1478
1479        upper_index = global_index
1480
1481        if upper_index == lower_index + newcols:
1482            points[lower_index:upper_index, :] = tpoints
1483            elevation[lower_index:upper_index] = telev
1484
1485    assert global_index == nopoints, 'index not equal to number of points'
1486
1487    infile.close()
1488    outfile.close()
1489
1490
1491
1492def _read_hecras_cross_sections(lines):
1493    """Return block of surface lines for each cross section
1494    Starts with SURFACE LINE,
1495    Ends with END CROSS-SECTION
1496    """
1497
1498    points = []
1499
1500    reading_surface = False
1501    for i, line in enumerate(lines):
1502
1503        if len(line.strip()) == 0:    #Ignore blanks
1504            continue
1505
1506        if lines[i].strip().startswith('SURFACE LINE'):
1507            reading_surface = True
1508            continue
1509
1510        if lines[i].strip().startswith('END') and reading_surface:
1511            yield points
1512            reading_surface = False
1513            points = []
1514
1515        if reading_surface:
1516            fields = line.strip().split(',')
1517            easting = float(fields[0])
1518            northing = float(fields[1])
1519            elevation = float(fields[2])
1520            points.append([easting, northing, elevation])
1521
1522
1523
1524
1525def hecras_cross_sections2pts(basename_in,
1526                              basename_out=None,
1527                              verbose=False):
1528    """Read HEC-RAS Elevation datal from the following ASCII format (.sdf)
1529
1530    Example:
1531
1532
1533# RAS export file created on Mon 15Aug2005 11:42
1534# by HEC-RAS Version 3.1.1
1535
1536BEGIN HEADER:
1537  UNITS: METRIC
1538  DTM TYPE: TIN
1539  DTM: v:\1\cit\perth_topo\river_tin
1540  STREAM LAYER: c:\local\hecras\21_02_03\up_canning_cent3d.shp
1541  CROSS-SECTION LAYER: c:\local\hecras\21_02_03\up_can_xs3d.shp
1542  MAP PROJECTION: UTM
1543  PROJECTION ZONE: 50
1544  DATUM: AGD66
1545  VERTICAL DATUM:
1546  NUMBER OF REACHES:  19
1547  NUMBER OF CROSS-SECTIONS:  14206
1548END HEADER:
1549
1550
1551Only the SURFACE LINE data of the following form will be utilised
1552
1553  CROSS-SECTION:
1554    STREAM ID:Southern-Wungong
1555    REACH ID:Southern-Wungong
1556    STATION:19040.*
1557    CUT LINE:
1558      405548.671603161 , 6438142.7594925
1559      405734.536092045 , 6438326.10404912
1560      405745.130459356 , 6438331.48627354
1561      405813.89633823 , 6438368.6272789
1562    SURFACE LINE:
1563     405548.67,   6438142.76,   35.37
1564     405552.24,   6438146.28,   35.41
1565     405554.78,   6438148.78,   35.44
1566     405555.80,   6438149.79,   35.44
1567     405559.37,   6438153.31,   35.45
1568     405560.88,   6438154.81,   35.44
1569     405562.93,   6438156.83,   35.42
1570     405566.50,   6438160.35,   35.38
1571     405566.99,   6438160.83,   35.37
1572     ...
1573   END CROSS-SECTION
1574
1575    Convert to NetCDF pts format which is
1576
1577    points:  (Nx2) Float array
1578    elevation: N Float array
1579    """
1580
1581    import os
1582    from Scientific.IO.NetCDF import NetCDFFile
1583    from Numeric import Float, zeros, reshape
1584
1585    root = basename_in
1586
1587    #Get ASCII file
1588    infile = open(root + '.sdf', 'r')  #Open SDF file for read
1589
1590    if verbose: print 'Reading DEM from %s' %(root + '.sdf')
1591
1592    lines = infile.readlines()
1593    infile.close()
1594
1595    if verbose: print 'Converting to pts format'
1596
1597    i = 0
1598    while lines[i].strip() == '' or lines[i].strip().startswith('#'):
1599        i += 1
1600
1601    assert lines[i].strip().upper() == 'BEGIN HEADER:'
1602    i += 1
1603
1604    assert lines[i].strip().upper().startswith('UNITS:')
1605    units = lines[i].strip().split()[1]
1606    i += 1
1607
1608    assert lines[i].strip().upper().startswith('DTM TYPE:')
1609    i += 1
1610
1611    assert lines[i].strip().upper().startswith('DTM:')
1612    i += 1
1613
1614    assert lines[i].strip().upper().startswith('STREAM')
1615    i += 1
1616
1617    assert lines[i].strip().upper().startswith('CROSS')
1618    i += 1
1619
1620    assert lines[i].strip().upper().startswith('MAP PROJECTION:')
1621    projection = lines[i].strip().split(':')[1]
1622    i += 1
1623
1624    assert lines[i].strip().upper().startswith('PROJECTION ZONE:')
1625    zone = int(lines[i].strip().split(':')[1])
1626    i += 1
1627
1628    assert lines[i].strip().upper().startswith('DATUM:')
1629    datum = lines[i].strip().split(':')[1]
1630    i += 1
1631
1632    assert lines[i].strip().upper().startswith('VERTICAL DATUM:')
1633    i += 1
1634
1635    assert lines[i].strip().upper().startswith('NUMBER OF REACHES:')
1636    i += 1
1637
1638    assert lines[i].strip().upper().startswith('NUMBER OF CROSS-SECTIONS:')
1639    number_of_cross_sections = int(lines[i].strip().split(':')[1])
1640    i += 1
1641
1642
1643    #Now read all points
1644    points = []
1645    elevation = []
1646    for j, entries in enumerate(_read_hecras_cross_sections(lines[i:])):
1647        for k, entry in enumerate(entries):
1648            points.append(entry[:2])
1649            elevation.append(entry[2])
1650
1651
1652    msg = 'Actual #number_of_cross_sections == %d, Reported as %d'\
1653          %(j+1, number_of_cross_sections)
1654    assert j+1 == number_of_cross_sections, msg
1655
1656    #Get output file
1657    if basename_out == None:
1658        ptsname = root + '.pts'
1659    else:
1660        ptsname = basename_out + '.pts'
1661
1662    geo_ref = Geo_reference(zone, 0, 0, datum, projection, units)
1663    geo = Geospatial_data(points, {"elevation":elevation},
1664                          verbose=verbose, geo_reference=geo_ref)
1665    geo.export_points_file(ptsname)
1666
1667def export_grid(basename_in, extra_name_out = None,
1668                quantities = None, # defaults to elevation
1669                timestep = None,
1670                reduction = None,
1671                cellsize = 10,
1672                NODATA_value = -9999,
1673                easting_min = None,
1674                easting_max = None,
1675                northing_min = None,
1676                northing_max = None,
1677                verbose = False,
1678                origin = None,
1679                datum = 'WGS84',
1680                format = 'ers'):
1681    """
1682   
1683    Wrapper for sww2dem. - see sww2dem to find out what most of the
1684    parameters do.
1685
1686    Quantities is a list of quantities.  Each quantity will be
1687    calculated for each sww file.
1688
1689    This returns the basenames of the files returned, which is made up
1690    of the dir and all of the file name, except the extension.
1691
1692    This function returns the names of the files produced.
1693
1694    It will also produce as many output files as there are input sww files.
1695    """
1696   
1697    if quantities is None:
1698        quantities = ['elevation']
1699       
1700    if type(quantities) is str:
1701            quantities = [quantities]
1702
1703    # How many sww files are there?
1704    dir, base = os.path.split(basename_in)
1705    #print "basename_in",basename_in
1706    #print "base",base
1707
1708    iterate_over = get_all_swwfiles(dir,base,verbose)
1709   
1710    if dir == "":
1711        dir = "." # Unix compatibility
1712   
1713    files_out = []
1714    #print 'sww_file',iterate_over
1715    for sww_file in iterate_over:
1716        for quantity in quantities:
1717            if extra_name_out is None:
1718                basename_out = sww_file + '_' + quantity
1719            else:
1720                basename_out = sww_file + '_' + quantity + '_' \
1721                               + extra_name_out
1722#            print "basename_out", basename_out
1723       
1724            file_out = sww2dem(dir+sep+sww_file, dir+sep+basename_out,
1725                               quantity, 
1726                               timestep,
1727                               reduction,
1728                               cellsize,
1729                               NODATA_value,
1730                               easting_min,
1731                               easting_max,
1732                               northing_min,
1733                               northing_max,
1734                               verbose,
1735                               origin,
1736                               datum,
1737                               format)
1738            files_out.append(file_out)
1739    #print "basenames_out after",basenames_out
1740    return files_out
1741
1742
1743def get_timeseries(production_dirs, output_dir, scenario_name, gauges_dir_name,
1744                   plot_quantity, generate_fig = False,
1745                   reportname = None, surface = False, time_min = None,
1746                   time_max = None, title_on = False, verbose = True,
1747                   nodes=None):
1748    """
1749    nodes - number of processes used.
1750
1751    warning - this function has no tests
1752    """
1753    if reportname == None:
1754        report = False
1755    else:
1756        report = True
1757       
1758    if nodes is None:
1759        is_parallel = False
1760    else:
1761        is_parallel = True
1762       
1763    # Generate figures
1764    swwfiles = {}
1765   
1766    if is_parallel is True:   
1767        for i in range(nodes):
1768            print 'Sending node %d of %d' %(i,nodes)
1769            swwfiles = {}
1770            if not reportname == None:
1771                reportname = report_name + '_%s' %(i)
1772            for label_id in production_dirs.keys():
1773                if label_id == 'boundaries':
1774                    swwfile = best_boundary_sww
1775                else:
1776                    file_loc = output_dir + label_id + sep
1777                    sww_extra = '_P%s_%s' %(i,nodes)
1778                    swwfile = file_loc + scenario_name + sww_extra + '.sww'
1779                    print 'swwfile',swwfile
1780                    swwfiles[swwfile] = label_id
1781
1782                texname, elev_output = sww2timeseries(swwfiles,
1783                                              gauges_dir_name,
1784                                              production_dirs,
1785                                              report = report,
1786                                              reportname = reportname,
1787                                              plot_quantity = plot_quantity,
1788                                              generate_fig = generate_fig,
1789                                              surface = surface,
1790                                              time_min = time_min,
1791                                              time_max = time_max,
1792                                              title_on = title_on,
1793                                              verbose = verbose)
1794    else:   
1795        for label_id in production_dirs.keys():       
1796            if label_id == 'boundaries':
1797                print 'boundaries'
1798                file_loc = project.boundaries_in_dir
1799                swwfile = project.boundaries_dir_name3 + '.sww'
1800                #  swwfile = boundary_dir_filename
1801            else:
1802                file_loc = output_dir + label_id + sep
1803                swwfile = file_loc + scenario_name + '.sww'
1804            swwfiles[swwfile] = label_id
1805       
1806        texname, elev_output = sww2timeseries(swwfiles,
1807                                              gauges_dir_name,
1808                                              production_dirs,
1809                                              report = report,
1810                                              reportname = reportname,
1811                                              plot_quantity = plot_quantity,
1812                                              generate_fig = generate_fig,
1813                                              surface = surface,
1814                                              time_min = time_min,
1815                                              time_max = time_max,
1816                                              title_on = title_on,
1817                                              verbose = verbose)
1818                                         
1819
1820   
1821def sww2dem(basename_in, basename_out = None,
1822            quantity = None, # defaults to elevation
1823            timestep = None,
1824            reduction = None,
1825            cellsize = 10,
1826            NODATA_value = -9999,
1827            easting_min = None,
1828            easting_max = None,
1829            northing_min = None,
1830            northing_max = None,
1831            verbose = False,
1832            origin = None,
1833            datum = 'WGS84',
1834            format = 'ers'):
1835
1836    """Read SWW file and convert to Digitial Elevation model format
1837    (.asc or .ers)
1838
1839    Example (ASC):
1840
1841    ncols         3121
1842    nrows         1800
1843    xllcorner     722000
1844    yllcorner     5893000
1845    cellsize      25
1846    NODATA_value  -9999
1847    138.3698 137.4194 136.5062 135.5558 ..........
1848
1849    Also write accompanying file with same basename_in but extension .prj
1850    used to fix the UTM zone, datum, false northings and eastings.
1851
1852    The prj format is assumed to be as
1853
1854    Projection    UTM
1855    Zone          56
1856    Datum         WGS84
1857    Zunits        NO
1858    Units         METERS
1859    Spheroid      WGS84
1860    Xshift        0.0000000000
1861    Yshift        10000000.0000000000
1862    Parameters
1863
1864
1865    The parameter quantity must be the name of an existing quantity or
1866    an expression involving existing quantities. The default is
1867    'elevation'. Quantity is not a list of quantities.
1868
1869    if timestep (an index) is given, output quantity at that timestep
1870
1871    if reduction is given use that to reduce quantity over all timesteps.
1872
1873    datum
1874
1875    format can be either 'asc' or 'ers'
1876    """
1877
1878    import sys
1879    from Numeric import array, Float, concatenate, NewAxis, zeros, reshape, \
1880         sometrue
1881    from Numeric import array2string
1882
1883    from anuga.utilities.polygon import inside_polygon, outside_polygon, \
1884         separate_points_by_polygon
1885    from anuga.abstract_2d_finite_volumes.util import \
1886         apply_expression_to_dictionary
1887
1888    msg = 'Format must be either asc or ers'
1889    assert format.lower() in ['asc', 'ers'], msg
1890
1891
1892    false_easting = 500000
1893    false_northing = 10000000
1894
1895    if quantity is None:
1896        quantity = 'elevation'
1897       
1898    if reduction is None:
1899        reduction = max
1900
1901    if basename_out is None:
1902        basename_out = basename_in + '_%s' %quantity
1903
1904    if quantity_formula.has_key(quantity):
1905        quantity = quantity_formula[quantity]
1906       
1907    swwfile = basename_in + '.sww'
1908    demfile = basename_out + '.' + format
1909    # Note the use of a .ers extension is optional (write_ermapper_grid will
1910    # deal with either option
1911   
1912    #if verbose: bye= nsuadsfd[0] # uncomment to check catching verbose errors
1913   
1914    # Read sww file
1915    if verbose: 
1916        print 'Reading from %s' %swwfile
1917        print 'Output directory is %s' %basename_out
1918   
1919    from Scientific.IO.NetCDF import NetCDFFile
1920    fid = NetCDFFile(swwfile)
1921
1922    #Get extent and reference
1923    x = fid.variables['x'][:]
1924    y = fid.variables['y'][:]
1925    volumes = fid.variables['volumes'][:]
1926    if timestep is not None:
1927        times = fid.variables['time'][timestep]
1928    else:
1929        times = fid.variables['time'][:]
1930
1931    number_of_timesteps = fid.dimensions['number_of_timesteps']
1932    number_of_points = fid.dimensions['number_of_points']
1933   
1934    if origin is None:
1935
1936        # Get geo_reference
1937        # sww files don't have to have a geo_ref
1938        try:
1939            geo_reference = Geo_reference(NetCDFObject=fid)
1940        except AttributeError, e:
1941            geo_reference = Geo_reference() # Default georef object
1942
1943        xllcorner = geo_reference.get_xllcorner()
1944        yllcorner = geo_reference.get_yllcorner()
1945        zone = geo_reference.get_zone()
1946    else:
1947        zone = origin[0]
1948        xllcorner = origin[1]
1949        yllcorner = origin[2]
1950
1951
1952
1953    # FIXME: Refactor using code from Interpolation_function.statistics
1954    # (in interpolate.py)
1955    # Something like print swwstats(swwname)
1956    if verbose:
1957        print '------------------------------------------------'
1958        print 'Statistics of SWW file:'
1959        print '  Name: %s' %swwfile
1960        print '  Reference:'
1961        print '    Lower left corner: [%f, %f]'\
1962              %(xllcorner, yllcorner)
1963        if timestep is not None:
1964            print '    Time: %f' %(times)
1965        else:
1966            print '    Start time: %f' %fid.starttime[0]
1967        print '  Extent:'
1968        print '    x [m] in [%f, %f], len(x) == %d'\
1969              %(min(x.flat), max(x.flat), len(x.flat))
1970        print '    y [m] in [%f, %f], len(y) == %d'\
1971              %(min(y.flat), max(y.flat), len(y.flat))
1972        if timestep is not None:
1973            print '    t [s] = %f, len(t) == %d' %(times, 1)
1974        else:
1975            print '    t [s] in [%f, %f], len(t) == %d'\
1976              %(min(times), max(times), len(times))
1977        print '  Quantities [SI units]:'
1978        # Comment out for reduced memory consumption
1979        for name in ['stage', 'xmomentum', 'ymomentum']:
1980            q = fid.variables[name][:].flat
1981            if timestep is not None:
1982                q = q[timestep*len(x):(timestep+1)*len(x)]
1983            if verbose: print '    %s in [%f, %f]' %(name, min(q), max(q))
1984        for name in ['elevation']:
1985            q = fid.variables[name][:].flat
1986            if verbose: print '    %s in [%f, %f]' %(name, min(q), max(q))
1987           
1988    # Get quantity and reduce if applicable
1989    if verbose: print 'Processing quantity %s' %quantity
1990
1991    # Turn NetCDF objects into Numeric arrays
1992    try:
1993        q = fid.variables[quantity][:] 
1994       
1995       
1996    except:
1997        quantity_dict = {}
1998        for name in fid.variables.keys():
1999            quantity_dict[name] = fid.variables[name][:] 
2000        #Convert quantity expression to quantities found in sww file   
2001        q = apply_expression_to_dictionary(quantity, quantity_dict)
2002    #print "q.shape",q.shape
2003    if len(q.shape) == 2:
2004        #q has a time component and needs to be reduced along
2005        #the temporal dimension
2006        if verbose: print 'Reducing quantity %s' %quantity
2007        q_reduced = zeros( number_of_points, Float )
2008       
2009        if timestep is not None:
2010            for k in range(number_of_points):
2011                q_reduced[k] = q[timestep,k] 
2012        else:
2013            for k in range(number_of_points):
2014                q_reduced[k] = reduction( q[:,k] )
2015
2016        q = q_reduced
2017
2018    #Post condition: Now q has dimension: number_of_points
2019    assert len(q.shape) == 1
2020    assert q.shape[0] == number_of_points
2021
2022    if verbose:
2023        print 'Processed values for %s are in [%f, %f]' %(quantity, min(q), max(q))
2024
2025
2026    #Create grid and update xll/yll corner and x,y
2027
2028    #Relative extent
2029    if easting_min is None:
2030        xmin = min(x)
2031    else:
2032        xmin = easting_min - xllcorner
2033
2034    if easting_max is None:
2035        xmax = max(x)
2036    else:
2037        xmax = easting_max - xllcorner
2038
2039    if northing_min is None:
2040        ymin = min(y)
2041    else:
2042        ymin = northing_min - yllcorner
2043
2044    if northing_max is None:
2045        ymax = max(y)
2046    else:
2047        ymax = northing_max - yllcorner
2048
2049
2050
2051    if verbose: print 'Creating grid'
2052    ncols = int((xmax-xmin)/cellsize)+1
2053    nrows = int((ymax-ymin)/cellsize)+1
2054
2055
2056    #New absolute reference and coordinates
2057    newxllcorner = xmin+xllcorner
2058    newyllcorner = ymin+yllcorner
2059
2060    x = x+xllcorner-newxllcorner
2061    y = y+yllcorner-newyllcorner
2062   
2063    vertex_points = concatenate ((x[:, NewAxis] ,y[:, NewAxis]), axis = 1)
2064    assert len(vertex_points.shape) == 2
2065
2066    grid_points = zeros ( (ncols*nrows, 2), Float )
2067
2068
2069    for i in xrange(nrows):
2070        if format.lower() == 'asc':
2071            yg = i*cellsize
2072        else:
2073        #this will flip the order of the y values for ers
2074            yg = (nrows-i)*cellsize
2075
2076        for j in xrange(ncols):
2077            xg = j*cellsize
2078            k = i*ncols + j
2079
2080            grid_points[k,0] = xg
2081            grid_points[k,1] = yg
2082
2083    #Interpolate
2084    from anuga.fit_interpolate.interpolate import Interpolate
2085
2086    # Remove loners from vertex_points, volumes here
2087    vertex_points, volumes = remove_lone_verts(vertex_points, volumes)
2088    #export_mesh_file('monkey.tsh',{'vertices':vertex_points, 'triangles':volumes})
2089    #import sys; sys.exit()
2090    interp = Interpolate(vertex_points, volumes, verbose = verbose)
2091
2092    #Interpolate using quantity values
2093    if verbose: print 'Interpolating'
2094    grid_values = interp.interpolate(q, grid_points).flat
2095
2096
2097    if verbose:
2098        print 'Interpolated values are in [%f, %f]' %(min(grid_values),
2099                                                      max(grid_values))
2100
2101    #Assign NODATA_value to all points outside bounding polygon (from interpolation mesh)
2102    P = interp.mesh.get_boundary_polygon()
2103    outside_indices = outside_polygon(grid_points, P, closed=True)
2104
2105    for i in outside_indices:
2106        grid_values[i] = NODATA_value
2107
2108
2109
2110
2111    if format.lower() == 'ers':
2112        # setup ERS header information
2113        grid_values = reshape(grid_values,(nrows, ncols))
2114        header = {}
2115        header['datum'] = '"' + datum + '"'
2116        # FIXME The use of hardwired UTM and zone number needs to be made optional
2117        # FIXME Also need an automatic test for coordinate type (i.e. EN or LL)
2118        header['projection'] = '"UTM-' + str(zone) + '"'
2119        header['coordinatetype'] = 'EN'
2120        if header['coordinatetype'] == 'LL':
2121            header['longitude'] = str(newxllcorner)
2122            header['latitude'] = str(newyllcorner)
2123        elif header['coordinatetype'] == 'EN':
2124            header['eastings'] = str(newxllcorner)
2125            header['northings'] = str(newyllcorner)
2126        header['nullcellvalue'] = str(NODATA_value)
2127        header['xdimension'] = str(cellsize)
2128        header['ydimension'] = str(cellsize)
2129        header['value'] = '"' + quantity + '"'
2130        #header['celltype'] = 'IEEE8ByteReal'  #FIXME: Breaks unit test
2131
2132
2133        #Write
2134        if verbose: print 'Writing %s' %demfile
2135        import ermapper_grids
2136        ermapper_grids.write_ermapper_grid(demfile, grid_values, header)
2137
2138        fid.close()
2139    else:
2140        #Write to Ascii format
2141
2142        #Write prj file
2143        prjfile = basename_out + '.prj'
2144
2145        if verbose: print 'Writing %s' %prjfile
2146        prjid = open(prjfile, 'w')
2147        prjid.write('Projection    %s\n' %'UTM')
2148        prjid.write('Zone          %d\n' %zone)
2149        prjid.write('Datum         %s\n' %datum)
2150        prjid.write('Zunits        NO\n')
2151        prjid.write('Units         METERS\n')
2152        prjid.write('Spheroid      %s\n' %datum)
2153        prjid.write('Xshift        %d\n' %false_easting)
2154        prjid.write('Yshift        %d\n' %false_northing)
2155        prjid.write('Parameters\n')
2156        prjid.close()
2157
2158
2159
2160        if verbose: print 'Writing %s' %demfile
2161
2162        ascid = open(demfile, 'w')
2163
2164        ascid.write('ncols         %d\n' %ncols)
2165        ascid.write('nrows         %d\n' %nrows)
2166        ascid.write('xllcorner     %d\n' %newxllcorner)
2167        ascid.write('yllcorner     %d\n' %newyllcorner)
2168        ascid.write('cellsize      %f\n' %cellsize)
2169        ascid.write('NODATA_value  %d\n' %NODATA_value)
2170
2171
2172        #Get bounding polygon from mesh
2173        #P = interp.mesh.get_boundary_polygon()
2174        #inside_indices = inside_polygon(grid_points, P)
2175
2176        for i in range(nrows):
2177            if verbose and i%((nrows+10)/10)==0:
2178                print 'Doing row %d of %d' %(i, nrows)
2179
2180            base_index = (nrows-i-1)*ncols
2181
2182            slice = grid_values[base_index:base_index+ncols]
2183            s = array2string(slice, max_line_width=sys.maxint)
2184            ascid.write(s[1:-1] + '\n')
2185
2186
2187            #print
2188            #for j in range(ncols):
2189            #    index = base_index+j#
2190            #    print grid_values[index],
2191            #    ascid.write('%f ' %grid_values[index])
2192            #ascid.write('\n')
2193
2194        #Close
2195        ascid.close()
2196        fid.close()
2197        return basename_out
2198
2199
2200#Backwards compatibility
2201def sww2asc(basename_in, basename_out = None,
2202            quantity = None,
2203            timestep = None,
2204            reduction = None,
2205            cellsize = 10,
2206            verbose = False,
2207            origin = None):
2208    print 'sww2asc will soon be obsoleted - please use sww2dem'
2209    sww2dem(basename_in,
2210            basename_out = basename_out,
2211            quantity = quantity,
2212            timestep = timestep,
2213            reduction = reduction,
2214            cellsize = cellsize,
2215            verbose = verbose,
2216            origin = origin,
2217        datum = 'WGS84',
2218        format = 'asc')
2219
2220def sww2ers(basename_in, basename_out = None,
2221            quantity = None,
2222            timestep = None,
2223            reduction = None,
2224            cellsize = 10,
2225            verbose = False,
2226            origin = None,
2227            datum = 'WGS84'):
2228    print 'sww2ers will soon be obsoleted - please use sww2dem'
2229    sww2dem(basename_in,
2230            basename_out = basename_out,
2231            quantity = quantity,
2232            timestep = timestep,
2233            reduction = reduction,
2234            cellsize = cellsize,
2235            verbose = verbose,
2236            origin = origin,
2237            datum = datum,
2238            format = 'ers')
2239################################# END COMPATIBILITY ##############
2240
2241
2242
2243def sww2pts(basename_in, basename_out=None,
2244            data_points=None,
2245            quantity=None,
2246            timestep=None,
2247            reduction=None,
2248            NODATA_value=-9999,
2249            verbose=False,
2250            origin=None):
2251            #datum = 'WGS84')
2252
2253
2254    """Read SWW file and convert to interpolated values at selected points
2255
2256    The parameter quantity' must be the name of an existing quantity or
2257    an expression involving existing quantities. The default is
2258    'elevation'.
2259
2260    if timestep (an index) is given, output quantity at that timestep
2261
2262    if reduction is given use that to reduce quantity over all timesteps.
2263
2264    data_points (Nx2 array) give locations of points where quantity is to be computed.
2265   
2266    """
2267
2268    import sys
2269    from Numeric import array, Float, concatenate, NewAxis, zeros, reshape, sometrue
2270    from Numeric import array2string
2271
2272    from anuga.utilities.polygon import inside_polygon, outside_polygon, separate_points_by_polygon
2273    from anuga.abstract_2d_finite_volumes.util import apply_expression_to_dictionary
2274
2275    from anuga.geospatial_data.geospatial_data import Geospatial_data
2276
2277    if quantity is None:
2278        quantity = 'elevation'
2279
2280    if reduction is None:
2281        reduction = max
2282
2283    if basename_out is None:
2284        basename_out = basename_in + '_%s' %quantity
2285
2286    swwfile = basename_in + '.sww'
2287    ptsfile = basename_out + '.pts'
2288
2289    # Read sww file
2290    if verbose: print 'Reading from %s' %swwfile
2291    from Scientific.IO.NetCDF import NetCDFFile
2292    fid = NetCDFFile(swwfile)
2293
2294    # Get extent and reference
2295    x = fid.variables['x'][:]
2296    y = fid.variables['y'][:]
2297    volumes = fid.variables['volumes'][:]
2298
2299    number_of_timesteps = fid.dimensions['number_of_timesteps']
2300    number_of_points = fid.dimensions['number_of_points']
2301    if origin is None:
2302
2303        # Get geo_reference
2304        # sww files don't have to have a geo_ref
2305        try:
2306            geo_reference = Geo_reference(NetCDFObject=fid)
2307        except AttributeError, e:
2308            geo_reference = Geo_reference() #Default georef object
2309
2310        xllcorner = geo_reference.get_xllcorner()
2311        yllcorner = geo_reference.get_yllcorner()
2312        zone = geo_reference.get_zone()
2313    else:
2314        zone = origin[0]
2315        xllcorner = origin[1]
2316        yllcorner = origin[2]
2317
2318
2319
2320    # FIXME: Refactor using code from file_function.statistics
2321    # Something like print swwstats(swwname)
2322    if verbose:
2323        x = fid.variables['x'][:]
2324        y = fid.variables['y'][:]
2325        times = fid.variables['time'][:]
2326        print '------------------------------------------------'
2327        print 'Statistics of SWW file:'
2328        print '  Name: %s' %swwfile
2329        print '  Reference:'
2330        print '    Lower left corner: [%f, %f]'\
2331              %(xllcorner, yllcorner)
2332        print '    Start time: %f' %fid.starttime[0]
2333        print '  Extent:'
2334        print '    x [m] in [%f, %f], len(x) == %d'\
2335              %(min(x.flat), max(x.flat), len(x.flat))
2336        print '    y [m] in [%f, %f], len(y) == %d'\
2337              %(min(y.flat), max(y.flat), len(y.flat))
2338        print '    t [s] in [%f, %f], len(t) == %d'\
2339              %(min(times), max(times), len(times))
2340        print '  Quantities [SI units]:'
2341        for name in ['stage', 'xmomentum', 'ymomentum', 'elevation']:
2342            q = fid.variables[name][:].flat
2343            print '    %s in [%f, %f]' %(name, min(q), max(q))
2344
2345
2346
2347    # Get quantity and reduce if applicable
2348    if verbose: print 'Processing quantity %s' %quantity
2349
2350    # Turn NetCDF objects into Numeric arrays
2351    quantity_dict = {}
2352    for name in fid.variables.keys():
2353        quantity_dict[name] = fid.variables[name][:]
2354
2355
2356
2357    # Convert quantity expression to quantities found in sww file   
2358    q = apply_expression_to_dictionary(quantity, quantity_dict)
2359
2360
2361
2362    if len(q.shape) == 2:
2363        # q has a time component and needs to be reduced along
2364        # the temporal dimension
2365        if verbose: print 'Reducing quantity %s' %quantity
2366        q_reduced = zeros( number_of_points, Float )
2367
2368        for k in range(number_of_points):
2369            q_reduced[k] = reduction( q[:,k] )
2370
2371        q = q_reduced
2372
2373    # Post condition: Now q has dimension: number_of_points
2374    assert len(q.shape) == 1
2375    assert q.shape[0] == number_of_points
2376
2377
2378    if verbose:
2379        print 'Processed values for %s are in [%f, %f]' %(quantity, min(q), max(q))
2380
2381
2382    # Create grid and update xll/yll corner and x,y
2383    vertex_points = concatenate ((x[:, NewAxis] ,y[:, NewAxis]), axis = 1)
2384    assert len(vertex_points.shape) == 2
2385
2386    # Interpolate
2387    from anuga.fit_interpolate.interpolate import Interpolate
2388    interp = Interpolate(vertex_points, volumes, verbose = verbose)
2389
2390    # Interpolate using quantity values
2391    if verbose: print 'Interpolating'
2392    interpolated_values = interp.interpolate(q, data_points).flat
2393
2394
2395    if verbose:
2396        print 'Interpolated values are in [%f, %f]' %(min(interpolated_values),
2397                                                      max(interpolated_values))
2398
2399    # Assign NODATA_value to all points outside bounding polygon (from interpolation mesh)
2400    P = interp.mesh.get_boundary_polygon()
2401    outside_indices = outside_polygon(data_points, P, closed=True)
2402
2403    for i in outside_indices:
2404        interpolated_values[i] = NODATA_value
2405
2406
2407    # Store results   
2408    G = Geospatial_data(data_points=data_points,
2409                        attributes=interpolated_values)
2410
2411    G.export_points_file(ptsfile, absolute = True)
2412
2413    fid.close()
2414
2415
2416def convert_dem_from_ascii2netcdf(basename_in, basename_out = None,
2417                                  use_cache = False,
2418                                  verbose = False):
2419    """Read Digitial Elevation model from the following ASCII format (.asc)
2420
2421    Example:
2422
2423    ncols         3121
2424    nrows         1800
2425    xllcorner     722000
2426    yllcorner     5893000
2427    cellsize      25
2428    NODATA_value  -9999
2429    138.3698 137.4194 136.5062 135.5558 ..........
2430
2431    Convert basename_in + '.asc' to NetCDF format (.dem)
2432    mimicking the ASCII format closely.
2433
2434
2435    An accompanying file with same basename_in but extension .prj must exist
2436    and is used to fix the UTM zone, datum, false northings and eastings.
2437
2438    The prj format is assumed to be as
2439
2440    Projection    UTM
2441    Zone          56
2442    Datum         WGS84
2443    Zunits        NO
2444    Units         METERS
2445    Spheroid      WGS84
2446    Xshift        0.0000000000
2447    Yshift        10000000.0000000000
2448    Parameters
2449    """
2450
2451
2452
2453    kwargs = {'basename_out': basename_out, 'verbose': verbose}
2454
2455    if use_cache is True:
2456        from caching import cache
2457        result = cache(_convert_dem_from_ascii2netcdf, basename_in, kwargs,
2458                       dependencies = [basename_in + '.asc',
2459                                       basename_in + '.prj'],
2460                       verbose = verbose)
2461
2462    else:
2463        result = apply(_convert_dem_from_ascii2netcdf, [basename_in], kwargs)
2464
2465    return result
2466
2467
2468
2469
2470
2471
2472def _convert_dem_from_ascii2netcdf(basename_in, basename_out = None,
2473                                  verbose = False):
2474    """Read Digitial Elevation model from the following ASCII format (.asc)
2475
2476    Internal function. See public function convert_dem_from_ascii2netcdf for details.
2477    """
2478
2479    import os
2480    from Scientific.IO.NetCDF import NetCDFFile
2481    from Numeric import Float, array
2482
2483    #root, ext = os.path.splitext(basename_in)
2484    root = basename_in
2485
2486    ###########################################
2487    # Read Meta data
2488    if verbose: print 'Reading METADATA from %s' %root + '.prj'
2489    metadatafile = open(root + '.prj')
2490    metalines = metadatafile.readlines()
2491    metadatafile.close()
2492
2493    L = metalines[0].strip().split()
2494    assert L[0].strip().lower() == 'projection'
2495    projection = L[1].strip()                   #TEXT
2496
2497    L = metalines[1].strip().split()
2498    assert L[0].strip().lower() == 'zone'
2499    zone = int(L[1].strip())
2500
2501    L = metalines[2].strip().split()
2502    assert L[0].strip().lower() == 'datum'
2503    datum = L[1].strip()                        #TEXT
2504
2505    L = metalines[3].strip().split()
2506    assert L[0].strip().lower() == 'zunits'     #IGNORE
2507    zunits = L[1].strip()                       #TEXT
2508
2509    L = metalines[4].strip().split()
2510    assert L[0].strip().lower() == 'units'
2511    units = L[1].strip()                        #TEXT
2512
2513    L = metalines[5].strip().split()
2514    assert L[0].strip().lower() == 'spheroid'   #IGNORE
2515    spheroid = L[1].strip()                     #TEXT
2516
2517    L = metalines[6].strip().split()
2518    assert L[0].strip().lower() == 'xshift'
2519    false_easting = float(L[1].strip())
2520
2521    L = metalines[7].strip().split()
2522    assert L[0].strip().lower() == 'yshift'
2523    false_northing = float(L[1].strip())
2524
2525    #print false_easting, false_northing, zone, datum
2526
2527
2528    ###########################################
2529    #Read DEM data
2530
2531    datafile = open(basename_in + '.asc')
2532
2533    if verbose: print 'Reading DEM from %s' %(basename_in + '.asc')
2534    lines = datafile.readlines()
2535    datafile.close()
2536
2537    if verbose: print 'Got', len(lines), ' lines'
2538
2539    ncols = int(lines[0].split()[1].strip())
2540    nrows = int(lines[1].split()[1].strip())
2541
2542    # Do cellsize (line 4) before line 2 and 3
2543    cellsize = float(lines[4].split()[1].strip())   
2544
2545    # Checks suggested by Joaquim Luis
2546    # Our internal representation of xllcorner
2547    # and yllcorner is non-standard.
2548    xref = lines[2].split()
2549    if xref[0].strip() == 'xllcorner':
2550        xllcorner = float(xref[1].strip()) # + 0.5*cellsize # Correct offset
2551    elif xref[0].strip() == 'xllcenter':
2552        xllcorner = float(xref[1].strip())
2553    else:
2554        msg = 'Unknown keyword: %s' %xref[0].strip()
2555        raise Exception, msg
2556
2557    yref = lines[3].split()
2558    if yref[0].strip() == 'yllcorner':
2559        yllcorner = float(yref[1].strip()) # + 0.5*cellsize # Correct offset
2560    elif yref[0].strip() == 'yllcenter':
2561        yllcorner = float(yref[1].strip())
2562    else:
2563        msg = 'Unknown keyword: %s' %yref[0].strip()
2564        raise Exception, msg
2565       
2566
2567    NODATA_value = int(lines[5].split()[1].strip())
2568
2569    assert len(lines) == nrows + 6
2570
2571
2572    ##########################################
2573
2574
2575    if basename_out == None:
2576        netcdfname = root + '.dem'
2577    else:
2578        netcdfname = basename_out + '.dem'
2579
2580    if verbose: print 'Store to NetCDF file %s' %netcdfname
2581    # NetCDF file definition
2582    fid = NetCDFFile(netcdfname, 'w')
2583
2584    #Create new file
2585    fid.institution = 'Geoscience Australia'
2586    fid.description = 'NetCDF DEM format for compact and portable storage ' +\
2587                      'of spatial point data'
2588
2589    fid.ncols = ncols
2590    fid.nrows = nrows
2591    fid.xllcorner = xllcorner
2592    fid.yllcorner = yllcorner
2593    fid.cellsize = cellsize
2594    fid.NODATA_value = NODATA_value
2595
2596    fid.zone = zone
2597    fid.false_easting = false_easting
2598    fid.false_northing = false_northing
2599    fid.projection = projection
2600    fid.datum = datum
2601    fid.units = units
2602
2603
2604    # dimension definitions
2605    fid.createDimension('number_of_rows', nrows)
2606    fid.createDimension('number_of_columns', ncols)
2607
2608    # variable definitions
2609    fid.createVariable('elevation', Float, ('number_of_rows',
2610                                            'number_of_columns'))
2611
2612    # Get handles to the variables
2613    elevation = fid.variables['elevation']
2614
2615    #Store data
2616    n = len(lines[6:])
2617    for i, line in enumerate(lines[6:]):
2618        fields = line.split()
2619        if verbose and i%((n+10)/10)==0:
2620            print 'Processing row %d of %d' %(i, nrows)
2621
2622        elevation[i, :] = array([float(x) for x in fields])
2623
2624    fid.close()
2625
2626
2627
2628
2629
2630def ferret2sww(basename_in, basename_out = None,
2631               verbose = False,
2632               minlat = None, maxlat = None,
2633               minlon = None, maxlon = None,
2634               mint = None, maxt = None, mean_stage = 0,
2635               origin = None, zscale = 1,
2636               fail_on_NaN = True,
2637               NaN_filler = 0,
2638               elevation = None,
2639               inverted_bathymetry = True
2640               ): #FIXME: Bathymetry should be obtained
2641                                  #from MOST somehow.
2642                                  #Alternatively from elsewhere
2643                                  #or, as a last resort,
2644                                  #specified here.
2645                                  #The value of -100 will work
2646                                  #for the Wollongong tsunami
2647                                  #scenario but is very hacky
2648    """Convert MOST and 'Ferret' NetCDF format for wave propagation to
2649    sww format native to abstract_2d_finite_volumes.
2650
2651    Specify only basename_in and read files of the form
2652    basefilename_ha.nc, basefilename_ua.nc, basefilename_va.nc containing
2653    relative height, x-velocity and y-velocity, respectively.
2654
2655    Also convert latitude and longitude to UTM. All coordinates are
2656    assumed to be given in the GDA94 datum.
2657
2658    min's and max's: If omitted - full extend is used.
2659    To include a value min may equal it, while max must exceed it.
2660    Lat and lon are assuemd to be in decimal degrees
2661
2662    origin is a 3-tuple with geo referenced
2663    UTM coordinates (zone, easting, northing)
2664
2665    nc format has values organised as HA[TIME, LATITUDE, LONGITUDE]
2666    which means that longitude is the fastest
2667    varying dimension (row major order, so to speak)
2668
2669    ferret2sww uses grid points as vertices in a triangular grid
2670    counting vertices from lower left corner upwards, then right
2671    """
2672
2673    import os
2674    from Scientific.IO.NetCDF import NetCDFFile
2675    from Numeric import Float, Int, Int32, searchsorted, zeros, array
2676    from Numeric import allclose, around
2677
2678    precision = Float
2679
2680    msg = 'Must use latitudes and longitudes for minlat, maxlon etc'
2681
2682    if minlat != None:
2683        assert -90 < minlat < 90 , msg
2684    if maxlat != None:
2685        assert -90 < maxlat < 90 , msg
2686        if minlat != None:
2687            assert maxlat > minlat
2688    if minlon != None:
2689        assert -180 < minlon < 180 , msg
2690    if maxlon != None:
2691        assert -180 < maxlon < 180 , msg
2692        if minlon != None:
2693            assert maxlon > minlon
2694       
2695
2696
2697    #Get NetCDF data
2698    if verbose: print 'Reading files %s_*.nc' %basename_in
2699    #print "basename_in + '_ha.nc'",basename_in + '_ha.nc'
2700    file_h = NetCDFFile(basename_in + '_ha.nc', 'r') #Wave amplitude (cm)
2701    file_u = NetCDFFile(basename_in + '_ua.nc', 'r') #Velocity (x) (cm/s)
2702    file_v = NetCDFFile(basename_in + '_va.nc', 'r') #Velocity (y) (cm/s)
2703    file_e = NetCDFFile(basename_in + '_e.nc', 'r')  #Elevation (z) (m)
2704
2705    if basename_out is None:
2706        swwname = basename_in + '.sww'
2707    else:
2708        swwname = basename_out + '.sww'
2709
2710    # Get dimensions of file_h
2711    for dimension in file_h.dimensions.keys():
2712        if dimension[:3] == 'LON':
2713            dim_h_longitude = dimension
2714        if dimension[:3] == 'LAT':
2715            dim_h_latitude = dimension
2716        if dimension[:4] == 'TIME':
2717            dim_h_time = dimension
2718
2719#    print 'long:', dim_h_longitude
2720#    print 'lats:', dim_h_latitude
2721#    print 'times:', dim_h_time
2722
2723    times = file_h.variables[dim_h_time]
2724    latitudes = file_h.variables[dim_h_latitude]
2725    longitudes = file_h.variables[dim_h_longitude]
2726
2727    kmin, kmax, lmin, lmax = _get_min_max_indexes(latitudes[:],
2728                                                  longitudes[:],
2729                                                  minlat, maxlat,
2730                                                  minlon, maxlon)
2731    # get dimensions for file_e
2732    for dimension in file_e.dimensions.keys():
2733        if dimension[:3] == 'LON':
2734            dim_e_longitude = dimension
2735        if dimension[:3] == 'LAT':
2736            dim_e_latitude = dimension
2737
2738    # get dimensions for file_u
2739    for dimension in file_u.dimensions.keys():
2740        if dimension[:3] == 'LON':
2741            dim_u_longitude = dimension
2742        if dimension[:3] == 'LAT':
2743            dim_u_latitude = dimension
2744        if dimension[:4] == 'TIME':
2745            dim_u_time = dimension
2746           
2747    # get dimensions for file_v
2748    for dimension in file_v.dimensions.keys():
2749        if dimension[:3] == 'LON':
2750            dim_v_longitude = dimension
2751        if dimension[:3] == 'LAT':
2752            dim_v_latitude = dimension
2753        if dimension[:4] == 'TIME':
2754            dim_v_time = dimension
2755
2756
2757    # Precision used by most for lat/lon is 4 or 5 decimals
2758    e_lat = around(file_e.variables[dim_e_latitude][:], 5)
2759    e_lon = around(file_e.variables[dim_e_longitude][:], 5)
2760
2761    # Check that files are compatible
2762    assert allclose(latitudes, file_u.variables[dim_u_latitude])
2763    assert allclose(latitudes, file_v.variables[dim_v_latitude])
2764    assert allclose(latitudes, e_lat)
2765
2766    assert allclose(longitudes, file_u.variables[dim_u_longitude])
2767    assert allclose(longitudes, file_v.variables[dim_v_longitude])
2768    assert allclose(longitudes, e_lon)
2769
2770    if mint is None:
2771        jmin = 0
2772        mint = times[0]       
2773    else:
2774        jmin = searchsorted(times, mint)
2775       
2776    if maxt is None:
2777        jmax = len(times)
2778        maxt = times[-1]
2779    else:
2780        jmax = searchsorted(times, maxt)
2781
2782    #print "latitudes[:]",latitudes[:]
2783    #print "longitudes[:]",longitudes [:]
2784    kmin, kmax, lmin, lmax = _get_min_max_indexes(latitudes[:],
2785                                                  longitudes[:],
2786                                                  minlat, maxlat,
2787                                                  minlon, maxlon)
2788
2789
2790    times = times[jmin:jmax]
2791    latitudes = latitudes[kmin:kmax]
2792    longitudes = longitudes[lmin:lmax]
2793
2794    #print "latitudes[:]",latitudes[:]
2795    #print "longitudes[:]",longitudes [:]
2796
2797    if verbose: print 'cropping'
2798    zname = 'ELEVATION'
2799
2800    amplitudes = file_h.variables['HA'][jmin:jmax, kmin:kmax, lmin:lmax]
2801    uspeed = file_u.variables['UA'][jmin:jmax, kmin:kmax, lmin:lmax] #Lon
2802    vspeed = file_v.variables['VA'][jmin:jmax, kmin:kmax, lmin:lmax] #Lat
2803    elevations = file_e.variables[zname][kmin:kmax, lmin:lmax]
2804
2805    #    if latitudes2[0]==latitudes[0] and latitudes2[-1]==latitudes[-1]:
2806    #        elevations = file_e.variables['ELEVATION'][kmin:kmax, lmin:lmax]
2807    #    elif latitudes2[0]==latitudes[-1] and latitudes2[-1]==latitudes[0]:
2808    #        from Numeric import asarray
2809    #        elevations=elevations.tolist()
2810    #        elevations.reverse()
2811    #        elevations=asarray(elevations)
2812    #    else:
2813    #        from Numeric import asarray
2814    #        elevations=elevations.tolist()
2815    #        elevations.reverse()
2816    #        elevations=asarray(elevations)
2817    #        'print hmmm'
2818
2819
2820
2821    #Get missing values
2822    nan_ha = file_h.variables['HA'].missing_value[0]
2823    nan_ua = file_u.variables['UA'].missing_value[0]
2824    nan_va = file_v.variables['VA'].missing_value[0]
2825    if hasattr(file_e.variables[zname],'missing_value'):
2826        nan_e  = file_e.variables[zname].missing_value[0]
2827    else:
2828        nan_e = None
2829
2830    #Cleanup
2831    from Numeric import sometrue
2832
2833    missing = (amplitudes == nan_ha)
2834    if sometrue (missing):
2835        if fail_on_NaN:
2836            msg = 'NetCDFFile %s contains missing values'\
2837                  %(basename_in+'_ha.nc')
2838            raise DataMissingValuesError, msg
2839        else:
2840            amplitudes = amplitudes*(missing==0) + missing*NaN_filler
2841
2842    missing = (uspeed == nan_ua)
2843    if sometrue (missing):
2844        if fail_on_NaN:
2845            msg = 'NetCDFFile %s contains missing values'\
2846                  %(basename_in+'_ua.nc')
2847            raise DataMissingValuesError, msg
2848        else:
2849            uspeed = uspeed*(missing==0) + missing*NaN_filler
2850
2851    missing = (vspeed == nan_va)
2852    if sometrue (missing):
2853        if fail_on_NaN:
2854            msg = 'NetCDFFile %s contains missing values'\
2855                  %(basename_in+'_va.nc')
2856            raise DataMissingValuesError, msg
2857        else:
2858            vspeed = vspeed*(missing==0) + missing*NaN_filler
2859
2860
2861    missing = (elevations == nan_e)
2862    if sometrue (missing):
2863        if fail_on_NaN:
2864            msg = 'NetCDFFile %s contains missing values'\
2865                  %(basename_in+'_e.nc')
2866            raise DataMissingValuesError, msg
2867        else:
2868            elevations = elevations*(missing==0) + missing*NaN_filler
2869
2870    #######
2871
2872
2873
2874    number_of_times = times.shape[0]
2875    number_of_latitudes = latitudes.shape[0]
2876    number_of_longitudes = longitudes.shape[0]
2877
2878    assert amplitudes.shape[0] == number_of_times
2879    assert amplitudes.shape[1] == number_of_latitudes
2880    assert amplitudes.shape[2] == number_of_longitudes
2881
2882    if verbose:
2883        print '------------------------------------------------'
2884        print 'Statistics:'
2885        print '  Extent (lat/lon):'
2886        print '    lat in [%f, %f], len(lat) == %d'\
2887              %(min(latitudes.flat), max(latitudes.flat),
2888                len(latitudes.flat))
2889        print '    lon in [%f, %f], len(lon) == %d'\
2890              %(min(longitudes.flat), max(longitudes.flat),
2891                len(longitudes.flat))
2892        print '    t in [%f, %f], len(t) == %d'\
2893              %(min(times.flat), max(times.flat), len(times.flat))
2894
2895        q = amplitudes.flat
2896        name = 'Amplitudes (ha) [cm]'
2897        print %s in [%f, %f]' %(name, min(q), max(q))
2898
2899        q = uspeed.flat
2900        name = 'Speeds (ua) [cm/s]'
2901        print %s in [%f, %f]' %(name, min(q), max(q))
2902
2903        q = vspeed.flat
2904        name = 'Speeds (va) [cm/s]'
2905        print %s in [%f, %f]' %(name, min(q), max(q))
2906
2907        q = elevations.flat
2908        name = 'Elevations (e) [m]'
2909        print %s in [%f, %f]' %(name, min(q), max(q))
2910
2911
2912    # print number_of_latitudes, number_of_longitudes
2913    number_of_points = number_of_latitudes*number_of_longitudes
2914    number_of_volumes = (number_of_latitudes-1)*(number_of_longitudes-1)*2
2915
2916
2917    file_h.close()
2918    file_u.close()
2919    file_v.close()
2920    file_e.close()
2921
2922
2923    # NetCDF file definition
2924    outfile = NetCDFFile(swwname, 'w')
2925
2926    description = 'Converted from Ferret files: %s, %s, %s, %s'\
2927                  %(basename_in + '_ha.nc',
2928                    basename_in + '_ua.nc',
2929                    basename_in + '_va.nc',
2930                    basename_in + '_e.nc')
2931   
2932    # Create new file
2933    starttime = times[0]
2934   
2935    sww = Write_sww()
2936    sww.store_header(outfile, times, number_of_volumes,
2937                     number_of_points, description=description,
2938                     verbose=verbose,sww_precision=Float)
2939
2940    # Store
2941    from anuga.coordinate_transforms.redfearn import redfearn
2942    x = zeros(number_of_points, Float)  #Easting
2943    y = zeros(number_of_points, Float)  #Northing
2944
2945
2946    if verbose: print 'Making triangular grid'
2947
2948    # Check zone boundaries
2949    refzone, _, _ = redfearn(latitudes[0],longitudes[0])
2950
2951    vertices = {}
2952    i = 0
2953    for k, lat in enumerate(latitudes):       #Y direction
2954        for l, lon in enumerate(longitudes):  #X direction
2955
2956            vertices[l,k] = i
2957
2958            zone, easting, northing = redfearn(lat,lon)
2959
2960            msg = 'Zone boundary crossed at longitude =', lon
2961            #assert zone == refzone, msg
2962            #print '%7.2f %7.2f %8.2f %8.2f' %(lon, lat, easting, northing)
2963            x[i] = easting
2964            y[i] = northing
2965            i += 1
2966
2967    #Construct 2 triangles per 'rectangular' element
2968    volumes = []
2969    for l in range(number_of_longitudes-1):    #X direction
2970        for k in range(number_of_latitudes-1): #Y direction
2971            v1 = vertices[l,k+1]
2972            v2 = vertices[l,k]
2973            v3 = vertices[l+1,k+1]
2974            v4 = vertices[l+1,k]
2975
2976            volumes.append([v1,v2,v3]) #Upper element
2977            volumes.append([v4,v3,v2]) #Lower element
2978
2979    volumes = array(volumes)
2980
2981    if origin is None:
2982        origin = Geo_reference(refzone,min(x),min(y))
2983    geo_ref = write_NetCDF_georeference(origin, outfile)
2984   
2985    if elevation is not None:
2986        z = elevation
2987    else:
2988        if inverted_bathymetry:
2989            z = -1*elevations
2990        else:
2991            z = elevations
2992    #FIXME: z should be obtained from MOST and passed in here
2993
2994    #FIXME use the Write_sww instance(sww) to write this info
2995    from Numeric import resize
2996    z = resize(z,outfile.variables['z'][:].shape)
2997    outfile.variables['x'][:] = x - geo_ref.get_xllcorner()
2998    outfile.variables['y'][:] = y - geo_ref.get_yllcorner()
2999    outfile.variables['z'][:] = z             #FIXME HACK for bacwards compat.
3000    outfile.variables['elevation'][:] = z
3001    outfile.variables['volumes'][:] = volumes.astype(Int32) #For Opteron 64
3002
3003
3004
3005    #Time stepping
3006    stage = outfile.variables['stage']
3007    xmomentum = outfile.variables['xmomentum']
3008    ymomentum = outfile.variables['ymomentum']
3009
3010    if verbose: print 'Converting quantities'
3011    n = len(times)
3012    for j in range(n):
3013        if verbose and j%((n+10)/10)==0: print '  Doing %d of %d' %(j, n)
3014        i = 0
3015        for k in range(number_of_latitudes):      #Y direction
3016            for l in range(number_of_longitudes): #X direction
3017                w = zscale*amplitudes[j,k,l]/100 + mean_stage
3018                stage[j,i] = w
3019                h = w - z[i]
3020                xmomentum[j,i] = uspeed[j,k,l]/100*h
3021                ymomentum[j,i] = vspeed[j,k,l]/100*h
3022                i += 1
3023
3024    #outfile.close()
3025
3026    #FIXME: Refactor using code from file_function.statistics
3027    #Something like print swwstats(swwname)
3028    if verbose:
3029        x = outfile.variables['x'][:]
3030        y = outfile.variables['y'][:]
3031        print '------------------------------------------------'
3032        print 'Statistics of output file:'
3033        print '  Name: %s' %swwname
3034        print '  Reference:'
3035        print '    Lower left corner: [%f, %f]'\
3036              %(geo_ref.get_xllcorner(), geo_ref.get_yllcorner())
3037        print ' Start time: %f' %starttime
3038        print '    Min time: %f' %mint
3039        print '    Max time: %f' %maxt
3040        print '  Extent:'
3041        print '    x [m] in [%f, %f], len(x) == %d'\
3042              %(min(x.flat), max(x.flat), len(x.flat))
3043        print '    y [m] in [%f, %f], len(y) == %d'\
3044              %(min(y.flat), max(y.flat), len(y.flat))
3045        print '    t [s] in [%f, %f], len(t) == %d'\
3046              %(min(times), max(times), len(times))
3047        print '  Quantities [SI units]:'
3048        for name in ['stage', 'xmomentum', 'ymomentum', 'elevation']:
3049            q = outfile.variables[name][:].flat
3050            print '    %s in [%f, %f]' %(name, min(q), max(q))
3051
3052
3053
3054    outfile.close()
3055
3056
3057
3058
3059
3060def timefile2netcdf(filename, quantity_names=None, time_as_seconds=False):
3061    """Template for converting typical text files with time series to
3062    NetCDF tms file.
3063
3064
3065    The file format is assumed to be either two fields separated by a comma:
3066
3067        time [DD/MM/YY hh:mm:ss], value0 value1 value2 ...
3068
3069    E.g
3070
3071      31/08/04 00:00:00, 1.328223 0 0
3072      31/08/04 00:15:00, 1.292912 0 0
3073
3074    or time (seconds), value0 value1 value2 ...
3075   
3076      0.0, 1.328223 0 0
3077      0.1, 1.292912 0 0
3078
3079    will provide a time dependent function f(t) with three attributes
3080
3081    filename is assumed to be the rootname with extenisons .txt and .sww
3082    """
3083
3084    import time, calendar
3085    from Numeric import array
3086    from anuga.config import time_format
3087    from anuga.utilities.numerical_tools import ensure_numeric
3088
3089
3090
3091    fid = open(filename + '.txt')
3092    line = fid.readline()
3093    fid.close()
3094
3095    fields = line.split(',')
3096    msg = 'File %s must have the format date, value0 value1 value2 ...'
3097    assert len(fields) == 2, msg
3098
3099    if not time_as_seconds:
3100        try:
3101            starttime = calendar.timegm(time.strptime(fields[0], time_format))
3102        except ValueError:
3103            msg = 'First field in file %s must be' %filename
3104            msg += ' date-time with format %s.\n' %time_format
3105            msg += 'I got %s instead.' %fields[0]
3106            raise DataTimeError, msg
3107    else:
3108        try:
3109            starttime = float(fields[0])
3110        except Error:
3111            msg = "Bad time format"
3112            raise DataTimeError, msg
3113
3114
3115    #Split values
3116    values = []
3117    for value in fields[1].split():
3118        values.append(float(value))
3119
3120    q = ensure_numeric(values)
3121
3122    msg = 'ERROR: File must contain at least one independent value'
3123    assert len(q.shape) == 1, msg
3124
3125
3126
3127    #Read times proper
3128    from Numeric import zeros, Float, alltrue
3129    from anuga.config import time_format
3130    import time, calendar
3131
3132    fid = open(filename + '.txt')
3133    lines = fid.readlines()
3134    fid.close()
3135
3136    N = len(lines)
3137    d = len(q)
3138
3139    T = zeros(N, Float)       #Time
3140    Q = zeros((N, d), Float)  #Values
3141
3142    for i, line in enumerate(lines):
3143        fields = line.split(',')
3144        if not time_as_seconds:
3145            realtime = calendar.timegm(time.strptime(fields[0], time_format))
3146        else:
3147             realtime = float(fields[0])
3148        T[i] = realtime - starttime
3149
3150        for j, value in enumerate(fields[1].split()):
3151            Q[i, j] = float(value)
3152
3153    msg = 'File %s must list time as a monotonuosly ' %filename
3154    msg += 'increasing sequence'
3155    assert alltrue( T[1:] - T[:-1] > 0 ), msg
3156
3157    #Create NetCDF file
3158    from Scientific.IO.NetCDF import NetCDFFile
3159
3160    fid = NetCDFFile(filename + '.tms', 'w')
3161
3162
3163    fid.institution = 'Geoscience Australia'
3164    fid.description = 'Time series'
3165
3166
3167    #Reference point
3168    #Start time in seconds since the epoch (midnight 1/1/1970)
3169    #FIXME: Use Georef
3170    fid.starttime = starttime
3171
3172    # dimension definitions
3173    #fid.createDimension('number_of_volumes', self.number_of_volumes)
3174    #fid.createDimension('number_of_vertices', 3)
3175
3176
3177    fid.createDimension('number_of_timesteps', len(T))
3178
3179    fid.createVariable('time', Float, ('number_of_timesteps',))
3180
3181    fid.variables['time'][:] = T
3182
3183    for i in range(Q.shape[1]):
3184        try:
3185            name = quantity_names[i]
3186        except:
3187            name = 'Attribute%d'%i
3188
3189        fid.createVariable(name, Float, ('number_of_timesteps',))
3190        fid.variables[name][:] = Q[:,i]
3191
3192    fid.close()
3193
3194
3195def extent_sww(file_name):
3196    """
3197    Read in an sww file.
3198
3199    Input;
3200    file_name - the sww file
3201
3202    Output;
3203    z - Vector of bed elevation
3204    volumes - Array.  Each row has 3 values, representing
3205    the vertices that define the volume
3206    time - Vector of the times where there is stage information
3207    stage - array with respect to time and vertices (x,y)
3208    """
3209
3210
3211    from Scientific.IO.NetCDF import NetCDFFile
3212
3213    #Check contents
3214    #Get NetCDF
3215    fid = NetCDFFile(file_name, 'r')
3216
3217    # Get the variables
3218    x = fid.variables['x'][:]
3219    y = fid.variables['y'][:]
3220    stage = fid.variables['stage'][:]
3221    #print "stage",stage
3222    #print "stage.shap",stage.shape
3223    #print "min(stage.flat), mpythonax(stage.flat)",min(stage.flat), max(stage.flat)
3224    #print "min(stage)",min(stage)
3225
3226    fid.close()
3227
3228    return [min(x),max(x),min(y),max(y),min(stage.flat),max(stage.flat)]
3229
3230
3231def sww2domain(filename, boundary=None, t=None,
3232               fail_if_NaN=True ,NaN_filler=0,
3233               verbose = False, very_verbose = False):
3234    """
3235    Usage: domain = sww2domain('file.sww',t=time (default = last time in file))
3236
3237    Boundary is not recommended if domain.smooth is not selected, as it
3238    uses unique coordinates, but not unique boundaries. This means that
3239    the boundary file will not be compatable with the coordinates, and will
3240    give a different final boundary, or crash.
3241    """
3242    NaN=9.969209968386869e+036
3243    #initialise NaN.
3244
3245    from Scientific.IO.NetCDF import NetCDFFile
3246    from shallow_water import Domain
3247    from Numeric import asarray, transpose, resize
3248
3249    if verbose: print 'Reading from ', filename
3250    fid = NetCDFFile(filename, 'r')    #Open existing file for read
3251    time = fid.variables['time']       #Timesteps
3252    if t is None:
3253        t = time[-1]
3254    time_interp = get_time_interp(time,t)
3255
3256    # Get the variables as Numeric arrays
3257    x = fid.variables['x'][:]             #x-coordinates of vertices
3258    y = fid.variables['y'][:]             #y-coordinates of vertices
3259    elevation = fid.variables['elevation']     #Elevation
3260    stage = fid.variables['stage']     #Water level
3261    xmomentum = fid.variables['xmomentum']   #Momentum in the x-direction
3262    ymomentum = fid.variables['ymomentum']   #Momentum in the y-direction
3263
3264    starttime = fid.starttime[0]
3265    volumes = fid.variables['volumes'][:] #Connectivity
3266    coordinates = transpose(asarray([x.tolist(),y.tolist()]))
3267    #FIXME (Ole): Something like this might be better: concatenate( (x, y), axis=1 )
3268    # or concatenate( (x[:,NewAxis],x[:,NewAxis]), axis=1 )
3269
3270    conserved_quantities = []
3271    interpolated_quantities = {}
3272    other_quantities = []
3273
3274    # get geo_reference
3275    #sww files don't have to have a geo_ref
3276    try:
3277        geo_reference = Geo_reference(NetCDFObject=fid)
3278    except: #AttributeError, e:
3279        geo_reference = None
3280
3281    if verbose: print '    getting quantities'
3282    for quantity in fid.variables.keys():
3283        dimensions = fid.variables[quantity].dimensions
3284        if 'number_of_timesteps' in dimensions:
3285            conserved_quantities.append(quantity)
3286            interpolated_quantities[quantity]=\
3287                  interpolated_quantity(fid.variables[quantity][:],time_interp)
3288        else: other_quantities.append(quantity)
3289
3290    other_quantities.remove('x')
3291    other_quantities.remove('y')
3292    other_quantities.remove('z')
3293    other_quantities.remove('volumes')
3294    try:
3295        other_quantities.remove('stage_range')
3296        other_quantities.remove('xmomentum_range')
3297        other_quantities.remove('ymomentum_range')
3298        other_quantities.remove('elevation_range')
3299    except:
3300        pass
3301       
3302
3303    conserved_quantities.remove('time')
3304
3305    if verbose: print '    building domain'
3306    #    From domain.Domain:
3307    #    domain = Domain(coordinates, volumes,\
3308    #                    conserved_quantities = conserved_quantities,\
3309    #                    other_quantities = other_quantities,zone=zone,\
3310    #                    xllcorner=xllcorner, yllcorner=yllcorner)
3311
3312    #   From shallow_water.Domain:
3313    coordinates=coordinates.tolist()
3314    volumes=volumes.tolist()
3315    #FIXME:should this be in mesh?(peter row)
3316    if fid.smoothing == 'Yes': unique = False
3317    else: unique = True
3318    if unique:
3319        coordinates,volumes,boundary=weed(coordinates,volumes,boundary)
3320
3321
3322    try:
3323        domain = Domain(coordinates, volumes, boundary)
3324    except AssertionError, e:
3325        fid.close()
3326        msg = 'Domain could not be created: %s. Perhaps use "fail_if_NaN=False and NaN_filler = ..."' %e
3327        raise DataDomainError, msg
3328
3329    if not boundary is None:
3330        domain.boundary = boundary
3331
3332    domain.geo_reference = geo_reference
3333
3334    domain.starttime=float(starttime)+float(t)
3335    domain.time=0.0
3336
3337    for quantity in other_quantities:
3338        try:
3339            NaN = fid.variables[quantity].missing_value
3340        except:
3341            pass #quantity has no missing_value number
3342        X = fid.variables[quantity][:]
3343        if very_verbose:
3344            print '       ',quantity
3345            print '        NaN =',NaN
3346            print '        max(X)'
3347            print '       ',max(X)
3348            print '        max(X)==NaN'
3349            print '       ',max(X)==NaN
3350            print ''
3351        if (max(X)==NaN) or (min(X)==NaN):
3352            if fail_if_NaN:
3353                msg = 'quantity "%s" contains no_data entry'%quantity
3354                raise DataMissingValuesError, msg
3355            else:
3356                data = (X<>NaN)
3357                X = (X*data)+(data==0)*NaN_filler
3358        if unique:
3359            X = resize(X,(len(X)/3,3))
3360        domain.set_quantity(quantity,X)
3361    #
3362    for quantity in conserved_quantities:
3363        try:
3364            NaN = fid.variables[quantity].missing_value
3365        except:
3366            pass #quantity has no missing_value number
3367        X = interpolated_quantities[quantity]
3368        if very_verbose:
3369            print '       ',quantity
3370            print '        NaN =',NaN
3371            print '        max(X)'
3372            print '       ',max(X)
3373            print '        max(X)==NaN'
3374            print '       ',max(X)==NaN
3375            print ''
3376        if (max(X)==NaN) or (min(X)==NaN):
3377            if fail_if_NaN:
3378                msg = 'quantity "%s" contains no_data entry'%quantity
3379                raise DataMissingValuesError, msg
3380            else:
3381                data = (X<>NaN)
3382                X = (X*data)+(data==0)*NaN_filler
3383        if unique:
3384            X = resize(X,(X.shape[0]/3,3))
3385        domain.set_quantity(quantity,X)
3386
3387    fid.close()
3388    return domain
3389
3390
3391def interpolated_quantity(saved_quantity,time_interp):
3392
3393    #given an index and ratio, interpolate quantity with respect to time.
3394    index,ratio = time_interp
3395    Q = saved_quantity
3396    if ratio > 0:
3397        q = (1-ratio)*Q[index]+ ratio*Q[index+1]
3398    else:
3399        q = Q[index]
3400    #Return vector of interpolated values
3401    return q
3402
3403
3404def get_time_interp(time,t=None):
3405    #Finds the ratio and index for time interpolation.
3406    #It is borrowed from previous abstract_2d_finite_volumes code.
3407    if t is None:
3408        t=time[-1]
3409        index = -1
3410        ratio = 0.
3411    else:
3412        T = time
3413        tau = t
3414        index=0
3415        msg = 'Time interval derived from file %s [%s:%s]'\
3416            %('FIXMEfilename', T[0], T[-1])
3417        msg += ' does not match model time: %s' %tau
3418        if tau < time[0]: raise DataTimeError, msg
3419        if tau > time[-1]: raise DataTimeError, msg
3420        while tau > time[index]: index += 1
3421        while tau < time[index]: index -= 1
3422        if tau == time[index]:
3423            #Protect against case where tau == time[-1] (last time)
3424            # - also works in general when tau == time[i]
3425            ratio = 0
3426        else:
3427            #t is now between index and index+1
3428            ratio = (tau - time[index])/(time[index+1] - time[index])
3429    return (index,ratio)
3430
3431
3432def weed(coordinates,volumes,boundary = None):
3433    if type(coordinates)==ArrayType:
3434        coordinates = coordinates.tolist()
3435    if type(volumes)==ArrayType:
3436        volumes = volumes.tolist()
3437
3438    unique = False
3439    point_dict = {}
3440    same_point = {}
3441    for i in range(len(coordinates)):
3442        point = tuple(coordinates[i])
3443        if point_dict.has_key(point):
3444            unique = True
3445            same_point[i]=point
3446            #to change all point i references to point j
3447        else:
3448            point_dict[point]=i
3449            same_point[i]=point
3450
3451    coordinates = []
3452    i = 0
3453    for point in point_dict.keys():
3454        point = tuple(point)
3455        coordinates.append(list(point))
3456        point_dict[point]=i
3457        i+=1
3458
3459
3460    for volume in volumes:
3461        for i in range(len(volume)):
3462            index = volume[i]
3463            if index>-1:
3464                volume[i]=point_dict[same_point[index]]
3465
3466    new_boundary = {}
3467    if not boundary is None:
3468        for segment in boundary.keys():
3469            point0 = point_dict[same_point[segment[0]]]
3470            point1 = point_dict[same_point[segment[1]]]
3471            label = boundary[segment]
3472            #FIXME should the bounday attributes be concaterated
3473            #('exterior, pond') or replaced ('pond')(peter row)
3474
3475            if new_boundary.has_key((point0,point1)):
3476                new_boundary[(point0,point1)]=new_boundary[(point0,point1)]#\
3477                                              #+','+label
3478
3479            elif new_boundary.has_key((point1,point0)):
3480                new_boundary[(point1,point0)]=new_boundary[(point1,point0)]#\
3481                                              #+','+label
3482            else: new_boundary[(point0,point1)]=label
3483
3484        boundary = new_boundary
3485
3486    return coordinates,volumes,boundary
3487
3488
3489def decimate_dem(basename_in, stencil, cellsize_new, basename_out=None,
3490                 verbose=False):
3491    """Read Digitial Elevation model from the following NetCDF format (.dem)
3492
3493    Example:
3494
3495    ncols         3121
3496    nrows         1800
3497    xllcorner     722000
3498    yllcorner     5893000
3499    cellsize      25
3500    NODATA_value  -9999
3501    138.3698 137.4194 136.5062 135.5558 ..........
3502
3503    Decimate data to cellsize_new using stencil and write to NetCDF dem format.
3504    """
3505
3506    import os
3507    from Scientific.IO.NetCDF import NetCDFFile
3508    from Numeric import Float, zeros, sum, reshape, equal
3509
3510    root = basename_in
3511    inname = root + '.dem'
3512
3513    #Open existing netcdf file to read
3514    infile = NetCDFFile(inname, 'r')
3515    if verbose: print 'Reading DEM from %s' %inname
3516
3517    #Read metadata
3518    ncols = infile.ncols[0]
3519    nrows = infile.nrows[0]
3520    xllcorner = infile.xllcorner[0]
3521    yllcorner = infile.yllcorner[0]
3522    cellsize = infile.cellsize[0]
3523    NODATA_value = infile.NODATA_value[0]
3524    zone = infile.zone[0]
3525    false_easting = infile.false_easting[0]
3526    false_northing = infile.false_northing[0]
3527    projection = infile.projection
3528    datum = infile.datum
3529    units = infile.units
3530
3531    dem_elevation = infile.variables['elevation']
3532
3533    #Get output file name
3534    if basename_out == None:
3535        outname = root + '_' + repr(cellsize_new) + '.dem'
3536    else:
3537        outname = basename_out + '.dem'
3538
3539    if verbose: print 'Write decimated NetCDF file to %s' %outname
3540
3541    #Determine some dimensions for decimated grid
3542    (nrows_stencil, ncols_stencil) = stencil.shape
3543    x_offset = ncols_stencil / 2
3544    y_offset = nrows_stencil / 2
3545    cellsize_ratio = int(cellsize_new / cellsize)
3546    ncols_new = 1 + (ncols - ncols_stencil) / cellsize_ratio
3547    nrows_new = 1 + (nrows - nrows_stencil) / cellsize_ratio
3548
3549    #Open netcdf file for output
3550    outfile = NetCDFFile(outname, 'w')
3551
3552    #Create new file
3553    outfile.institution = 'Geoscience Australia'
3554    outfile.description = 'NetCDF DEM format for compact and portable storage ' +\
3555                           'of spatial point data'
3556    #Georeferencing
3557    outfile.zone = zone
3558    outfile.projection = projection
3559    outfile.datum = datum
3560    outfile.units = units
3561
3562    outfile.cellsize = cellsize_new
3563    outfile.NODATA_value = NODATA_value
3564    outfile.false_easting = false_easting
3565    outfile.false_northing = false_northing
3566
3567    outfile.xllcorner = xllcorner + (x_offset * cellsize)
3568    outfile.yllcorner = yllcorner + (y_offset * cellsize)
3569    outfile.ncols = ncols_new
3570    outfile.nrows = nrows_new
3571
3572    # dimension definition
3573    outfile.createDimension('number_of_points', nrows_new*ncols_new)
3574
3575    # variable definition
3576    outfile.createVariable('elevation', Float, ('number_of_points',))
3577
3578    # Get handle to the variable
3579    elevation = outfile.variables['elevation']
3580
3581    dem_elevation_r = reshape(dem_elevation, (nrows, ncols))
3582
3583    #Store data
3584    global_index = 0
3585    for i in range(nrows_new):
3586        if verbose: print 'Processing row %d of %d' %(i, nrows_new)
3587        lower_index = global_index
3588        telev =  zeros(ncols_new, Float)
3589        local_index = 0
3590        trow = i * cellsize_ratio
3591
3592        for j in range(ncols_new):
3593            tcol = j * cellsize_ratio
3594            tmp = dem_elevation_r[trow:trow+nrows_stencil, tcol:tcol+ncols_stencil]
3595
3596            #if dem contains 1 or more NODATA_values set value in
3597            #decimated dem to NODATA_value, else compute decimated
3598            #value using stencil
3599            if sum(sum(equal(tmp, NODATA_value))) > 0:
3600                telev[local_index] = NODATA_value
3601            else:
3602                telev[local_index] = sum(sum(tmp * stencil))
3603
3604            global_index += 1
3605            local_index += 1
3606
3607        upper_index = global_index
3608
3609        elevation[lower_index:upper_index] = telev
3610
3611    assert global_index == nrows_new*ncols_new, 'index not equal to number of points'
3612
3613    infile.close()
3614    outfile.close()
3615
3616
3617
3618
3619def tsh2sww(filename, verbose=False): 
3620    """
3621    to check if a tsh/msh file 'looks' good.
3622    """
3623
3624
3625    if verbose == True:print 'Creating domain from', filename
3626    domain = pmesh_to_domain_instance(filename, Domain)
3627    if verbose == True:print "Number of triangles = ", len(domain)
3628
3629    domain.smooth = True
3630    domain.format = 'sww'   #Native netcdf visualisation format
3631    file_path, filename = path.split(filename)
3632    filename, ext = path.splitext(filename)
3633    domain.set_name(filename)   
3634    domain.reduction = mean
3635    if verbose == True:print "file_path",file_path
3636    if file_path == "":file_path = "."
3637    domain.set_datadir(file_path)
3638
3639    if verbose == True:
3640        print "Output written to " + domain.get_datadir() + sep + \
3641              domain.get_name() + "." + domain.format
3642    sww = get_dataobject(domain)
3643    sww.store_connectivity()
3644    sww.store_timestep()
3645
3646
3647def asc_csiro2sww(bath_dir,
3648                  elevation_dir,
3649                  ucur_dir,
3650                  vcur_dir,
3651                  sww_file,
3652                  minlat = None, maxlat = None,
3653                  minlon = None, maxlon = None,
3654                  zscale=1,
3655                  mean_stage = 0,
3656                  fail_on_NaN = True,
3657                  elevation_NaN_filler = 0,
3658                  bath_prefix='ba',
3659                  elevation_prefix='el',
3660                  verbose=False):
3661    """
3662    Produce an sww boundary file, from esri ascii data from CSIRO.
3663
3664    Also convert latitude and longitude to UTM. All coordinates are
3665    assumed to be given in the GDA94 datum.
3666
3667    assume:
3668    All files are in esri ascii format
3669
3670    4 types of information
3671    bathymetry
3672    elevation
3673    u velocity
3674    v velocity
3675
3676    Assumptions
3677    The metadata of all the files is the same
3678    Each type is in a seperate directory
3679    One bath file with extention .000
3680    The time period is less than 24hrs and uniform.
3681    """
3682    from Scientific.IO.NetCDF import NetCDFFile
3683
3684    from anuga.coordinate_transforms.redfearn import redfearn
3685
3686    precision = Float # So if we want to change the precision its done here
3687
3688    # go in to the bath dir and load the only file,
3689    bath_files = os.listdir(bath_dir)
3690
3691    bath_file = bath_files[0]
3692    bath_dir_file =  bath_dir + os.sep + bath_file
3693    bath_metadata,bath_grid =  _read_asc(bath_dir_file)
3694
3695    #Use the date.time of the bath file as a basis for
3696    #the start time for other files
3697    base_start = bath_file[-12:]
3698
3699    #go into the elevation dir and load the 000 file
3700    elevation_dir_file = elevation_dir  + os.sep + elevation_prefix \
3701                         + base_start
3702
3703    elevation_files = os.listdir(elevation_dir)
3704    ucur_files = os.listdir(ucur_dir)
3705    vcur_files = os.listdir(vcur_dir)
3706    elevation_files.sort()
3707    # the first elevation file should be the
3708    # file with the same base name as the bath data
3709    assert elevation_files[0] == 'el' + base_start
3710
3711    number_of_latitudes = bath_grid.shape[0]
3712    number_of_longitudes = bath_grid.shape[1]
3713    number_of_volumes = (number_of_latitudes-1)*(number_of_longitudes-1)*2
3714
3715    longitudes = [bath_metadata['xllcorner']+x*bath_metadata['cellsize'] \
3716                  for x in range(number_of_longitudes)]
3717    latitudes = [bath_metadata['yllcorner']+y*bath_metadata['cellsize'] \
3718                 for y in range(number_of_latitudes)]
3719
3720     # reverse order of lat, so the fist lat represents the first grid row
3721    latitudes.reverse()
3722
3723    kmin, kmax, lmin, lmax = _get_min_max_indexes(latitudes[:],longitudes[:],
3724                                                 minlat=minlat, maxlat=maxlat,
3725                                                 minlon=minlon, maxlon=maxlon)
3726
3727
3728    bath_grid = bath_grid[kmin:kmax,lmin:lmax]
3729    latitudes = latitudes[kmin:kmax]
3730    longitudes = longitudes[lmin:lmax]
3731    number_of_latitudes = len(latitudes)
3732    number_of_longitudes = len(longitudes)
3733    number_of_times = len(os.listdir(elevation_dir))
3734    number_of_points = number_of_latitudes*number_of_longitudes
3735    number_of_volumes = (number_of_latitudes-1)*(number_of_longitudes-1)*2
3736
3737    #Work out the times
3738    if len(elevation_files) > 1:
3739        # Assume: The time period is less than 24hrs.
3740        time_period = (int(elevation_files[1][-3:]) - \
3741                      int(elevation_files[0][-3:]))*60*60
3742        times = [x*time_period for x in range(len(elevation_files))]
3743    else:
3744        times = [0.0]
3745
3746
3747    if verbose:
3748        print '------------------------------------------------'
3749        print 'Statistics:'
3750        print '  Extent (lat/lon):'
3751        print '    lat in [%f, %f], len(lat) == %d'\
3752              %(min(latitudes), max(latitudes),
3753                len(latitudes))
3754        print '    lon in [%f, %f], len(lon) == %d'\
3755              %(min(longitudes), max(longitudes),
3756                len(longitudes))
3757        print '    t in [%f, %f], len(t) == %d'\
3758              %(min(times), max(times), len(times))
3759
3760    ######### WRITE THE SWW FILE #############
3761    # NetCDF file definition
3762    outfile = NetCDFFile(sww_file, 'w')
3763
3764    #Create new file
3765    outfile.institution = 'Geoscience Australia'
3766    outfile.description = 'Converted from XXX'
3767
3768
3769    #For sww compatibility
3770    outfile.smoothing = 'Yes'
3771    outfile.order = 1
3772
3773    #Start time in seconds since the epoch (midnight 1/1/1970)
3774    outfile.starttime = starttime = times[0]
3775
3776
3777    # dimension definitions
3778    outfile.createDimension('number_of_volumes', number_of_volumes)
3779
3780    outfile.createDimension('number_of_vertices', 3)
3781    outfile.createDimension('number_of_points', number_of_points)
3782    outfile.createDimension('number_of_timesteps', number_of_times)
3783
3784    # variable definitions
3785    outfile.createVariable('x', precision, ('number_of_points',))
3786    outfile.createVariable('y', precision, ('number_of_points',))
3787    outfile.createVariable('elevation', precision, ('number_of_points',))
3788
3789    #FIXME: Backwards compatibility
3790    outfile.createVariable('z', precision, ('number_of_points',))
3791    #################################
3792
3793    outfile.createVariable('volumes', Int, ('number_of_volumes',
3794                                            'number_of_vertices'))
3795
3796    outfile.createVariable('time', precision,
3797                           ('number_of_timesteps',))
3798
3799    outfile.createVariable('stage', precision,
3800                           ('number_of_timesteps',
3801                            'number_of_points'))
3802
3803    outfile.createVariable('xmomentum', precision,
3804                           ('number_of_timesteps',
3805                            'number_of_points'))
3806
3807    outfile.createVariable('ymomentum', precision,
3808                           ('number_of_timesteps',
3809                            'number_of_points'))
3810
3811    #Store
3812    from anuga.coordinate_transforms.redfearn import redfearn
3813    x = zeros(number_of_points, Float)  #Easting
3814    y = zeros(number_of_points, Float)  #Northing
3815
3816    if verbose: print 'Making triangular grid'
3817    #Get zone of 1st point.
3818    refzone, _, _ = redfearn(latitudes[0],longitudes[0])
3819
3820    vertices = {}
3821    i = 0
3822    for k, lat in enumerate(latitudes):
3823        for l, lon in enumerate(longitudes):
3824
3825            vertices[l,k] = i
3826
3827            zone, easting, northing = redfearn(lat,lon)
3828
3829            msg = 'Zone boundary crossed at longitude =', lon
3830            #assert zone == refzone, msg
3831            #print '%7.2f %7.2f %8.2f %8.2f' %(lon, lat, easting, northing)
3832            x[i] = easting
3833            y[i] = northing
3834            i += 1
3835
3836
3837    #Construct 2 triangles per 'rectangular' element
3838    volumes = []
3839    for l in range(number_of_longitudes-1):    #X direction
3840        for k in range(number_of_latitudes-1): #Y direction
3841            v1 = vertices[l,k+1]
3842            v2 = vertices[l,k]
3843            v3 = vertices[l+1,k+1]
3844            v4 = vertices[l+1,k]
3845
3846            #Note, this is different to the ferrit2sww code
3847            #since the order of the lats is reversed.
3848            volumes.append([v1,v3,v2]) #Upper element
3849            volumes.append([v4,v2,v3]) #Lower element
3850
3851    volumes = array(volumes)
3852
3853    geo_ref = Geo_reference(refzone,min(x),min(y))
3854    geo_ref.write_NetCDF(outfile)
3855
3856    # This will put the geo ref in the middle
3857    #geo_ref = Geo_reference(refzone,(max(x)+min(x))/2.0,(max(x)+min(y))/2.)
3858
3859
3860    if verbose:
3861        print '------------------------------------------------'
3862        print 'More Statistics:'
3863        print '  Extent (/lon):'
3864        print '    x in [%f, %f], len(lat) == %d'\
3865              %(min(x), max(x),
3866                len(x))
3867        print '    y in [%f, %f], len(lon) == %d'\
3868              %(min(y), max(y),
3869                len(y))
3870        print 'geo_ref: ',geo_ref
3871
3872    z = resize(bath_grid,outfile.variables['z'][:].shape)
3873    outfile.variables['x'][:] = x - geo_ref.get_xllcorner()
3874    outfile.variables['y'][:] = y - geo_ref.get_yllcorner()
3875    outfile.variables['z'][:] = z
3876    outfile.variables['elevation'][:] = z  #FIXME HACK
3877    outfile.variables['volumes'][:] = volumes.astype(Int32) #On Opteron 64
3878
3879    stage = outfile.variables['stage']
3880    xmomentum = outfile.variables['xmomentum']
3881    ymomentum = outfile.variables['ymomentum']
3882
3883    outfile.variables['time'][:] = times   #Store time relative
3884
3885    if verbose: print 'Converting quantities'
3886    n = number_of_times
3887    for j in range(number_of_times):
3888        # load in files
3889        elevation_meta, elevation_grid = \
3890           _read_asc(elevation_dir + os.sep + elevation_files[j])
3891
3892        _, u_momentum_grid =  _read_asc(ucur_dir + os.sep + ucur_files[j])
3893        _, v_momentum_grid =  _read_asc(vcur_dir + os.sep + vcur_files[j])
3894
3895        #cut matrix to desired size
3896        elevation_grid = elevation_grid[kmin:kmax,lmin:lmax]
3897        u_momentum_grid = u_momentum_grid[kmin:kmax,lmin:lmax]
3898        v_momentum_grid = v_momentum_grid[kmin:kmax,lmin:lmax]
3899       
3900        # handle missing values
3901        missing = (elevation_grid == elevation_meta['NODATA_value'])
3902        if sometrue (missing):
3903            if fail_on_NaN:
3904                msg = 'File %s contains missing values'\
3905                      %(elevation_files[j])
3906                raise DataMissingValuesError, msg
3907            else:
3908                elevation_grid = elevation_grid*(missing==0) + \
3909                                 missing*elevation_NaN_filler
3910
3911
3912        if verbose and j%((n+10)/10)==0: print '  Doing %d of %d' %(j, n)
3913        i = 0
3914        for k in range(number_of_latitudes):      #Y direction
3915            for l in range(number_of_longitudes): #X direction
3916                w = zscale*elevation_grid[k,l] + mean_stage
3917                stage[j,i] = w
3918                h = w - z[i]
3919                xmomentum[j,i] = u_momentum_grid[k,l]*h
3920                ymomentum[j,i] = v_momentum_grid[k,l]*h
3921                i += 1
3922    outfile.close()
3923
3924def _get_min_max_indexes(latitudes_ref,longitudes_ref,
3925                        minlat=None, maxlat=None,
3926                        minlon=None, maxlon=None):
3927    """
3928    return max, min indexes (for slicing) of the lat and long arrays to cover the area
3929    specified with min/max lat/long
3930
3931    Think of the latitudes and longitudes describing a 2d surface.
3932    The area returned is, if possible, just big enough to cover the
3933    inputed max/min area. (This will not be possible if the max/min area
3934    has a section outside of the latitudes/longitudes area.)
3935
3936    asset  longitudes are sorted,
3937    long - from low to high (west to east, eg 148 - 151)
3938    assert latitudes are sorted, ascending or decending
3939    """
3940    latitudes = latitudes_ref[:]
3941    longitudes = longitudes_ref[:]
3942
3943    latitudes = ensure_numeric(latitudes)
3944    longitudes = ensure_numeric(longitudes)
3945
3946    assert allclose(sort(longitudes), longitudes)
3947
3948    #print latitudes[0],longitudes[0]
3949    #print len(latitudes),len(longitudes)
3950    #print latitudes[len(latitudes)-1],longitudes[len(longitudes)-1]
3951   
3952    lat_ascending = True
3953    if not allclose(sort(latitudes), latitudes):
3954        lat_ascending = False
3955        # reverse order of lat, so it's in ascending order         
3956        latitudes = latitudes[::-1]
3957        assert allclose(sort(latitudes), latitudes)
3958    #print "latitudes  in funct", latitudes
3959   
3960    largest_lat_index = len(latitudes)-1
3961    #Cut out a smaller extent.
3962    if minlat == None:
3963        lat_min_index = 0
3964    else:
3965        lat_min_index = searchsorted(latitudes, minlat)-1
3966        if lat_min_index <0:
3967            lat_min_index = 0
3968
3969
3970    if maxlat == None:
3971        lat_max_index = largest_lat_index #len(latitudes)
3972    else:
3973        lat_max_index = searchsorted(latitudes, maxlat)
3974        if lat_max_index > largest_lat_index:
3975            lat_max_index = largest_lat_index
3976
3977    if minlon == None:
3978        lon_min_index = 0
3979    else:
3980        lon_min_index = searchsorted(longitudes, minlon)-1
3981        if lon_min_index <0:
3982            lon_min_index = 0
3983
3984    if maxlon == None:
3985        lon_max_index = len(longitudes)
3986    else:
3987        lon_max_index = searchsorted(longitudes, maxlon)
3988
3989    # Reversing the indexes, if the lat array is decending
3990    if lat_ascending is False:
3991        lat_min_index, lat_max_index = largest_lat_index - lat_max_index , \
3992                                       largest_lat_index - lat_min_index
3993    lat_max_index = lat_max_index + 1 # taking into account how slicing works
3994    lon_max_index = lon_max_index + 1 # taking into account how slicing works
3995
3996    return lat_min_index, lat_max_index, lon_min_index, lon_max_index
3997
3998
3999def _read_asc(filename, verbose=False):
4000    """Read esri file from the following ASCII format (.asc)
4001
4002    Example:
4003
4004    ncols         3121
4005    nrows         1800
4006    xllcorner     722000
4007    yllcorner     5893000
4008    cellsize      25
4009    NODATA_value  -9999
4010    138.3698 137.4194 136.5062 135.5558 ..........
4011
4012    """
4013
4014    datafile = open(filename)
4015
4016    if verbose: print 'Reading DEM from %s' %(filename)
4017    lines = datafile.readlines()
4018    datafile.close()
4019
4020    if verbose: print 'Got', len(lines), ' lines'
4021
4022    ncols = int(lines.pop(0).split()[1].strip())
4023    nrows = int(lines.pop(0).split()[1].strip())
4024    xllcorner = float(lines.pop(0).split()[1].strip())
4025    yllcorner = float(lines.pop(0).split()[1].strip())
4026    cellsize = float(lines.pop(0).split()[1].strip())
4027    NODATA_value = float(lines.pop(0).split()[1].strip())
4028
4029    assert len(lines) == nrows
4030
4031    #Store data
4032    grid = []
4033
4034    n = len(lines)
4035    for i, line in enumerate(lines):
4036        cells = line.split()
4037        assert len(cells) == ncols
4038        grid.append(array([float(x) for x in cells]))
4039    grid = array(grid)
4040
4041    return {'xllcorner':xllcorner,
4042            'yllcorner':yllcorner,
4043            'cellsize':cellsize,
4044            'NODATA_value':NODATA_value}, grid
4045
4046
4047
4048    ####  URS 2 SWW  ###
4049
4050lon_name = 'LON'
4051lat_name = 'LAT'
4052time_name = 'TIME'
4053precision = Float # So if we want to change the precision its done here       
4054class Write_nc:
4055    """
4056    Write an nc file.
4057   
4058    Note, this should be checked to meet cdc netcdf conventions for gridded
4059    data. http://www.cdc.noaa.gov/cdc/conventions/cdc_netcdf_standard.shtml
4060   
4061    """
4062    def __init__(self,
4063                 quantity_name,
4064                 file_name,
4065                 time_step_count,
4066                 time_step,
4067                 lon,
4068                 lat):
4069        """
4070        time_step_count is the number of time steps.
4071        time_step is the time step size
4072       
4073        pre-condition: quantity_name must be 'HA' 'UA'or 'VA'.
4074        """
4075        self.quantity_name = quantity_name
4076        quantity_units = {'HA':'CENTIMETERS',
4077                              'UA':'CENTIMETERS/SECOND',
4078                              'VA':'CENTIMETERS/SECOND'}       
4079       
4080        multiplier_dic = {'HA':100.0, # To convert from m to cm
4081                              'UA':100.0,  #  m/s to cm/sec
4082                              'VA':-100.0}  # MUX files have positve x in the
4083        # Southern direction.  This corrects for it, when writing nc files.
4084       
4085        self.quantity_multiplier =  multiplier_dic[self.quantity_name]
4086       
4087        #self.file_name = file_name
4088        self.time_step_count = time_step_count
4089        self.time_step = time_step
4090
4091        # NetCDF file definition
4092        self.outfile = NetCDFFile(file_name, 'w')
4093        outfile = self.outfile       
4094
4095        #Create new file
4096        nc_lon_lat_header(outfile, lon, lat)
4097   
4098        # TIME
4099        outfile.createDimension(time_name, None)
4100        outfile.createVariable(time_name, precision, (time_name,))
4101
4102        #QUANTITY
4103        outfile.createVariable(self.quantity_name, precision,
4104                               (time_name, lat_name, lon_name))
4105        outfile.variables[self.quantity_name].missing_value=-1.e+034
4106        outfile.variables[self.quantity_name].units= \
4107                                 quantity_units[self.quantity_name]
4108        outfile.variables[lon_name][:]= ensure_numeric(lon)
4109        outfile.variables[lat_name][:]= ensure_numeric(lat)
4110
4111        #Assume no one will be wanting to read this, while we are writing
4112        #outfile.close()
4113       
4114    def store_timestep(self, quantity_slice):
4115        """
4116        Write a time slice of quantity info
4117        quantity_slice is the data to be stored at this time step
4118        """
4119       
4120        outfile = self.outfile
4121       
4122        # Get the variables
4123        time = outfile.variables[time_name]
4124        quantity = outfile.variables[self.quantity_name]
4125           
4126        i = len(time)
4127
4128        #Store time
4129        time[i] = i*self.time_step #self.domain.time
4130        quantity[i,:] = quantity_slice* self.quantity_multiplier
4131       
4132    def close(self):
4133        self.outfile.close()
4134
4135def urs2sww(basename_in='o', basename_out=None, verbose=False,
4136            remove_nc_files=True,
4137            minlat=None, maxlat=None,
4138            minlon= None, maxlon=None,
4139            mint=None, maxt=None,
4140            mean_stage=0,
4141            origin = None,
4142            zscale=1,
4143            fail_on_NaN=True,
4144            NaN_filler=0,
4145            elevation=None):
4146    """
4147    Convert URS C binary format for wave propagation to
4148    sww format native to abstract_2d_finite_volumes.
4149
4150    Specify only basename_in and read files of the form
4151    basefilename-z-mux2, basefilename-e-mux2 and
4152    basefilename-n-mux2 containing relative height,
4153    x-velocity and y-velocity, respectively.
4154
4155    Also convert latitude and longitude to UTM. All coordinates are
4156    assumed to be given in the GDA94 datum. The latitude and longitude
4157    information is for  a grid.
4158
4159    min's and max's: If omitted - full extend is used.
4160    To include a value min may equal it, while max must exceed it.
4161    Lat and lon are assumed to be in decimal degrees.
4162    NOTE: minlon is the most east boundary.
4163   
4164    origin is a 3-tuple with geo referenced
4165    UTM coordinates (zone, easting, northing)
4166    It will be the origin of the sww file. This shouldn't be used,
4167    since all of anuga should be able to handle an arbitary origin.
4168
4169
4170    URS C binary format has data orgainised as TIME, LONGITUDE, LATITUDE
4171    which means that latitude is the fastest
4172    varying dimension (row major order, so to speak)
4173
4174    In URS C binary the latitudes and longitudes are in assending order.
4175    """
4176    if basename_out == None:
4177        basename_out = basename_in
4178    files_out = urs2nc(basename_in, basename_out)
4179    ferret2sww(basename_out,
4180               minlat=minlat,
4181               maxlat=maxlat,
4182               minlon=minlon,
4183               maxlon=maxlon,
4184               mint=mint,
4185               maxt=maxt,
4186               mean_stage=mean_stage,
4187               origin=origin,
4188               zscale=zscale,
4189               fail_on_NaN=fail_on_NaN,
4190               NaN_filler=NaN_filler,
4191               inverted_bathymetry=True,
4192               verbose=verbose)
4193    #print "files_out",files_out
4194    if remove_nc_files:
4195        for file_out in files_out:
4196            os.remove(file_out)
4197   
4198def urs2nc(basename_in = 'o', basename_out = 'urs'):
4199    """
4200    Convert the 3 urs files to 4 nc files.
4201
4202    The name of the urs file names must be;
4203    [basename_in]-z-mux
4204    [basename_in]-e-mux
4205    [basename_in]-n-mux
4206   
4207    """
4208   
4209    files_in = [basename_in + WAVEHEIGHT_MUX_LABEL,
4210                basename_in + EAST_VELOCITY_LABEL,
4211                basename_in + NORTH_VELOCITY_LABEL]
4212    files_out = [basename_out+'_ha.nc',
4213                 basename_out+'_ua.nc',
4214                 basename_out+'_va.nc']
4215    quantities = ['HA','UA','VA']
4216
4217    #if os.access(files_in[0]+'.mux', os.F_OK) == 0 :
4218    for i, file_name in enumerate(files_in):
4219        if os.access(file_name, os.F_OK) == 0:
4220            if os.access(file_name+'.mux', os.F_OK) == 0 :
4221                msg = 'File %s does not exist or is not accessible' %file_name
4222                raise IOError, msg
4223            else:
4224               files_in[i] += '.mux'
4225               print "file_name", file_name
4226    hashed_elevation = None
4227    for file_in, file_out, quantity in map(None, files_in,
4228                                           files_out,
4229                                           quantities):
4230        lonlatdep, lon, lat, depth = _binary_c2nc(file_in,
4231                                         file_out,
4232                                         quantity)
4233        #print "lonlatdep", lonlatdep
4234        if hashed_elevation == None:
4235            elevation_file = basename_out+'_e.nc'
4236            write_elevation_nc(elevation_file,
4237                                lon,
4238                                lat,
4239                                depth)
4240            hashed_elevation = myhash(lonlatdep)
4241        else:
4242            msg = "The elevation information in the mux files is inconsistent"
4243            assert hashed_elevation == myhash(lonlatdep), msg
4244    files_out.append(elevation_file)
4245    return files_out
4246   
4247def _binary_c2nc(file_in, file_out, quantity):
4248    """
4249    Reads in a quantity urs file and writes a quantity nc file.
4250    additionally, returns the depth and lat, long info,
4251    so it can be written to a file.
4252    """
4253    columns = 3 # long, lat , depth
4254    mux_file = open(file_in, 'rb')
4255   
4256    # Number of points/stations
4257    (points_num,)= unpack('i',mux_file.read(4))
4258
4259    # nt, int - Number of time steps
4260    (time_step_count,)= unpack('i',mux_file.read(4))
4261
4262    #dt, float - time step, seconds
4263    (time_step,) = unpack('f', mux_file.read(4))
4264   
4265    msg = "Bad data in the mux file."
4266    if points_num < 0:
4267        mux_file.close()
4268        raise ANUGAError, msg
4269    if time_step_count < 0:
4270        mux_file.close()
4271        raise ANUGAError, msg
4272    if time_step < 0:
4273        mux_file.close()
4274        raise ANUGAError, msg
4275   
4276    lonlatdep = p_array.array('f')
4277    lonlatdep.read(mux_file, columns * points_num)
4278    lonlatdep = array(lonlatdep, typecode=Float)   
4279    lonlatdep = reshape(lonlatdep, (points_num, columns))
4280   
4281    lon, lat, depth = lon_lat2grid(lonlatdep)
4282    lon_sorted = list(lon)
4283    lon_sorted.sort()
4284
4285    if not lon == lon_sorted:
4286        msg = "Longitudes in mux file are not in ascending order"
4287        raise IOError, msg
4288    lat_sorted = list(lat)
4289    lat_sorted.sort()
4290
4291    if not lat == lat_sorted:
4292        msg = "Latitudes in mux file are not in ascending order"
4293   
4294    nc_file = Write_nc(quantity,
4295                       file_out,
4296                       time_step_count,
4297                       time_step,
4298                       lon,
4299                       lat)
4300
4301    for i in range(time_step_count):
4302        #Read in a time slice  from mux file 
4303        hz_p_array = p_array.array('f')
4304        hz_p_array.read(mux_file, points_num)
4305        hz_p = array(hz_p_array, typecode=Float)
4306        hz_p = reshape(hz_p, (len(lon), len(lat)))
4307        hz_p = transpose(hz_p) #mux has lat varying fastest, nc has long v.f.
4308
4309        #write time slice to nc file
4310        nc_file.store_timestep(hz_p)
4311    mux_file.close()
4312    nc_file.close()
4313
4314    return lonlatdep, lon, lat, depth
4315   
4316
4317def write_elevation_nc(file_out, lon, lat, depth_vector):
4318    """
4319    Write an nc elevation file.
4320    """
4321   
4322    # NetCDF file definition
4323    outfile = NetCDFFile(file_out, 'w')
4324
4325    #Create new file
4326    nc_lon_lat_header(outfile, lon, lat)
4327   
4328    # ELEVATION
4329    zname = 'ELEVATION'
4330    outfile.createVariable(zname, precision, (lat_name, lon_name))
4331    outfile.variables[zname].units='CENTIMETERS'
4332    outfile.variables[zname].missing_value=-1.e+034
4333
4334    outfile.variables[lon_name][:]= ensure_numeric(lon)
4335    outfile.variables[lat_name][:]= ensure_numeric(lat)
4336
4337    depth = reshape(depth_vector, ( len(lat), len(lon)))
4338    outfile.variables[zname][:]= depth
4339   
4340    outfile.close()
4341   
4342def nc_lon_lat_header(outfile, lon, lat):
4343    """
4344    outfile is the netcdf file handle.
4345    lon - a list/array of the longitudes
4346    lat - a list/array of the latitudes
4347    """
4348   
4349    outfile.institution = 'Geoscience Australia'
4350    outfile.description = 'Converted from URS binary C'
4351   
4352    # Longitude
4353    outfile.createDimension(lon_name, len(lon))
4354    outfile.createVariable(lon_name, precision, (lon_name,))
4355    outfile.variables[lon_name].point_spacing='uneven'
4356    outfile.variables[lon_name].units='degrees_east'
4357    outfile.variables[lon_name].assignValue(lon)
4358
4359
4360    # Latitude
4361    outfile.createDimension(lat_name, len(lat))
4362    outfile.createVariable(lat_name, precision, (lat_name,))
4363    outfile.variables[lat_name].point_spacing='uneven'
4364    outfile.variables[lat_name].units='degrees_north'
4365    outfile.variables[lat_name].assignValue(lat)
4366
4367
4368   
4369def lon_lat2grid(long_lat_dep):
4370    """
4371    given a list of points that are assumed to be an a grid,
4372    return the long's and lat's of the grid.
4373    long_lat_dep is an array where each row is a position.
4374    The first column is longitudes.
4375    The second column is latitudes.
4376
4377    The latitude is the fastest varying dimension - in mux files
4378    """
4379    LONG = 0
4380    LAT = 1
4381    QUANTITY = 2
4382
4383    long_lat_dep = ensure_numeric(long_lat_dep, Float)
4384   
4385    num_points = long_lat_dep.shape[0]
4386    this_rows_long = long_lat_dep[0,LONG]
4387
4388    # Count the length of unique latitudes
4389    i = 0
4390    while long_lat_dep[i,LONG] == this_rows_long and i < num_points:
4391        i += 1
4392    # determine the lats and longsfrom the grid
4393    lat = long_lat_dep[:i, LAT]       
4394    long = long_lat_dep[::i, LONG]
4395   
4396    lenlong = len(long)
4397    lenlat = len(lat)
4398    #print 'len lat', lat, len(lat)
4399    #print 'len long', long, len(long)
4400         
4401    msg = 'Input data is not gridded'     
4402    assert num_points % lenlat == 0, msg
4403    assert num_points % lenlong == 0, msg
4404         
4405    # Test that data is gridded       
4406    for i in range(lenlong):
4407        msg = 'Data is not gridded.  It must be for this operation'
4408        first = i*lenlat
4409        last = first + lenlat
4410               
4411        assert allclose(long_lat_dep[first:last,LAT], lat), msg
4412        assert allclose(long_lat_dep[first:last,LONG], long[i]), msg
4413   
4414   
4415#    print 'range long', min(long), max(long)
4416#    print 'range lat', min(lat), max(lat)
4417#    print 'ref long', min(long_lat_dep[:,0]), max(long_lat_dep[:,0])
4418#    print 'ref lat', min(long_lat_dep[:,1]), max(long_lat_dep[:,1])
4419   
4420   
4421   
4422    msg = 'Out of range latitudes/longitudes'
4423    for l in lat:assert -90 < l < 90 , msg
4424    for l in long:assert -180 < l < 180 , msg
4425
4426    # Changing quantity from lat being the fastest varying dimension to
4427    # long being the fastest varying dimension
4428    # FIXME - make this faster/do this a better way
4429    # use numeric transpose, after reshaping the quantity vector
4430#    quantity = zeros(len(long_lat_dep), Float)
4431    quantity = zeros(num_points, Float)
4432   
4433#    print 'num',num_points
4434    for lat_i, _ in enumerate(lat):
4435        for long_i, _ in enumerate(long):
4436            q_index = lat_i*lenlong+long_i
4437            lld_index = long_i*lenlat+lat_i
4438#            print 'lat_i', lat_i, 'long_i',long_i, 'q_index', q_index, 'lld_index', lld_index
4439            temp = long_lat_dep[lld_index, QUANTITY]
4440            quantity[q_index] = temp
4441           
4442    return long, lat, quantity
4443
4444####  END URS 2 SWW  ###
4445
4446#### URS UNGRIDDED 2 SWW ###
4447
4448### PRODUCING THE POINTS NEEDED FILE ###
4449
4450# Ones used for FESA 2007 results
4451#LL_LAT = -50.0
4452#LL_LONG = 80.0
4453#GRID_SPACING = 1.0/60.0
4454#LAT_AMOUNT = 4800
4455#LONG_AMOUNT = 3600
4456
4457def URS_points_needed_to_file(file_name, boundary_polygon, zone,
4458                              ll_lat, ll_long,
4459                              grid_spacing, 
4460                              lat_amount, long_amount,
4461                              isSouthernHemisphere=True,
4462                              export_csv=False, use_cache=False,
4463                              verbose=False):
4464    """
4465    Given the info to replicate the URS grid and a polygon output
4466    a file that specifies the cloud of boundary points for URS.
4467
4468    This creates a .urs file.  This is in the format used by URS;
4469    1st line is the number of points,
4470    each line after represents a point,in lats and longs.
4471   
4472    Note: The polygon cannot cross zones or hemispheres.
4473
4474    A work-a-round for different zones or hemispheres is to run this twice,
4475    once for each zone, and then combine the output.
4476   
4477    file_name - name of the urs file produced for David.
4478    boundary_polygon - a list of points that describes a polygon.
4479                      The last point is assumed ot join the first point.
4480                      This is in UTM (lat long would be better though)
4481
4482     This is info about the URS model that needs to be inputted.
4483     
4484    ll_lat - lower left latitude, in decimal degrees
4485    ll-long - lower left longitude, in decimal degrees
4486    grid_spacing - in deciamal degrees
4487    lat_amount - number of latitudes
4488    long_amount- number of longs
4489
4490
4491    Don't add the file extension.  It will be added.
4492    """
4493    geo = URS_points_needed(boundary_polygon, zone, ll_lat, ll_long,
4494                            grid_spacing, 
4495                            lat_amount, long_amount, isSouthernHemisphere,
4496                            use_cache, verbose)
4497    if not file_name[-4:] == ".urs":
4498        file_name += ".urs"
4499    geo.export_points_file(file_name, isSouthHemisphere=isSouthernHemisphere)
4500    if export_csv:
4501        if file_name[-4:] == ".urs":
4502            file_name = file_name[:-4] + ".csv"
4503        geo.export_points_file(file_name)
4504    return geo
4505
4506def URS_points_needed(boundary_polygon, zone, ll_lat,
4507                      ll_long, grid_spacing, 
4508                      lat_amount, long_amount, isSouthHemisphere=True,
4509                      use_cache=False, verbose=False):
4510    args = (boundary_polygon,
4511            zone, ll_lat,
4512            ll_long, grid_spacing, 
4513            lat_amount, long_amount, isSouthHemisphere)
4514    kwargs = {} 
4515    if use_cache is True:
4516        try:
4517            from anuga.caching import cache
4518        except:
4519            msg = 'Caching was requested, but caching module'+\
4520                  'could not be imported'
4521            raise msg
4522
4523
4524        geo = cache(_URS_points_needed,
4525                  args, kwargs,
4526                  verbose=verbose,
4527                  compression=False)
4528    else:
4529        geo = apply(_URS_points_needed, args, kwargs)
4530
4531    return geo
4532
4533def _URS_points_needed(boundary_polygon,
4534                      zone, ll_lat,
4535                      ll_long, grid_spacing, 
4536                      lat_amount, long_amount,
4537                       isSouthHemisphere):
4538    """
4539    boundary_polygon - a list of points that describes a polygon.
4540                      The last point is assumed ot join the first point.
4541                      This is in UTM (lat long would b better though)
4542
4543    ll_lat - lower left latitude, in decimal degrees
4544    ll-long - lower left longitude, in decimal degrees
4545    grid_spacing - in deciamal degrees
4546
4547    """
4548   
4549    from sets import ImmutableSet
4550   
4551    msg = "grid_spacing can not be zero"
4552    assert not grid_spacing == 0, msg
4553    a = boundary_polygon
4554    # List of segments.  Each segment is two points.
4555    segs = [i and [a[i-1], a[i]] or [a[len(a)-1], a[0]] for i in range(len(a))]
4556    # convert the segs to Lat's and longs.
4557   
4558    # Don't assume the zone of the segments is the same as the lower left
4559    # corner of the lat long data!!  They can easily be in different zones
4560   
4561    lat_long_set = ImmutableSet()
4562    for seg in segs:
4563        points_lat_long = points_needed(seg, ll_lat, ll_long, grid_spacing, 
4564                      lat_amount, long_amount, zone, isSouthHemisphere)
4565        lat_long_set |= ImmutableSet(points_lat_long)
4566    if lat_long_set == ImmutableSet([]):
4567        msg = """URS region specified and polygon does not overlap."""
4568        raise ValueError, msg
4569
4570    # Warning there is no info in geospatial saying the hemisphere of
4571    # these points.  There should be.
4572    geo = Geospatial_data(data_points=list(lat_long_set),
4573                              points_are_lats_longs=True)
4574    return geo
4575   
4576def points_needed(seg, ll_lat, ll_long, grid_spacing, 
4577                  lat_amount, long_amount, zone,
4578                  isSouthHemisphere):
4579    """
4580    seg is two points, in UTM
4581    return a list of the points, in lats and longs that are needed to
4582    interpolate any point on the segment.
4583    """
4584    from math import sqrt
4585    #print "zone",zone
4586    geo_reference = Geo_reference(zone=zone)
4587    #print "seg", seg
4588    geo = Geospatial_data(seg,geo_reference=geo_reference)
4589    seg_lat_long = geo.get_data_points(as_lat_long=True,
4590                                       isSouthHemisphere=isSouthHemisphere)
4591    #print "seg_lat_long", seg_lat_long
4592    # 1.415 = 2^0.5, rounded up....
4593    sqrt_2_rounded_up = 1.415
4594    buffer = sqrt_2_rounded_up * grid_spacing
4595   
4596    max_lat = max(seg_lat_long[0][0], seg_lat_long[1][0]) + buffer
4597    max_long = max(seg_lat_long[0][1], seg_lat_long[1][1]) + buffer
4598    min_lat = min(seg_lat_long[0][0], seg_lat_long[1][0]) - buffer
4599    min_long = min(seg_lat_long[0][1], seg_lat_long[1][1]) - buffer
4600
4601    #print "min_long", min_long
4602    #print "ll_long", ll_long
4603    #print "grid_spacing", grid_spacing
4604    first_row = (min_long - ll_long)/grid_spacing
4605    # To round up
4606    first_row_long = int(round(first_row + 0.5))
4607    #print "first_row", first_row_long
4608
4609    last_row = (max_long - ll_long)/grid_spacing # round down
4610    last_row_long = int(round(last_row))
4611    #print "last_row",last_row _long
4612   
4613    first_row = (min_lat - ll_lat)/grid_spacing
4614    # To round up
4615    first_row_lat = int(round(first_row + 0.5))
4616    #print "first_row", first_row_lat
4617
4618    last_row = (max_lat - ll_lat)/grid_spacing # round down
4619    last_row_lat = int(round(last_row))
4620    #print "last_row",last_row_lat
4621
4622    # to work out the max distance -
4623    # 111120 - horizontal distance between 1 deg latitude.
4624    #max_distance = sqrt_2_rounded_up * 111120 * grid_spacing
4625    max_distance = 157147.4112 * grid_spacing
4626    #print "max_distance", max_distance #2619.12 m for 1 minute
4627    points_lat_long = []
4628    # Create a list of the lat long points to include.
4629    for index_lat in range(first_row_lat, last_row_lat + 1):
4630        for index_long in range(first_row_long, last_row_long + 1):
4631            #print "index_lat", index_lat
4632            #print "index_long", index_long
4633            lat = ll_lat + index_lat*grid_spacing
4634            long = ll_long + index_long*grid_spacing
4635
4636            #filter here to keep good points
4637            if keep_point(lat, long, seg, max_distance):
4638                points_lat_long.append((lat, long)) #must be hashable
4639    #print "points_lat_long", points_lat_long
4640
4641    # Now that we have these points, lets throw ones out that are too far away
4642    return points_lat_long
4643
4644def keep_point(lat, long, seg, max_distance):
4645    """
4646    seg is two points, UTM
4647    """
4648    from math import sqrt
4649    _ , x0, y0 = redfearn(lat, long)
4650    x1 = seg[0][0]
4651    y1 = seg[0][1]
4652    x2 = seg[1][0]
4653    y2 = seg[1][1]
4654    x2_1 = x2-x1
4655    y2_1 = y2-y1
4656    try:
4657        d = abs((x2_1)*(y1-y0)-(x1-x0)*(y2_1))/sqrt( \
4658            (x2_1)*(x2_1)+(y2_1)*(y2_1))
4659    except ZeroDivisionError:
4660        #print "seg", seg
4661        #print "x0", x0
4662        #print "y0", y0
4663        if sqrt((x2_1)*(x2_1)+(y2_1)*(y2_1)) == 0 and \
4664           abs((x2_1)*(y1-y0)-(x1-x0)*(y2_1)) == 0:
4665            return True
4666        else:
4667            return False
4668   
4669    if d <= max_distance:
4670        return True
4671    else:
4672        return False
4673   
4674    #### CONVERTING UNGRIDDED URS DATA TO AN SWW FILE ####
4675   
4676WAVEHEIGHT_MUX_LABEL = '-z-mux'
4677EAST_VELOCITY_LABEL =  '-e-mux'
4678NORTH_VELOCITY_LABEL =  '-n-mux' 
4679def urs_ungridded2sww(basename_in='o', basename_out=None, verbose=False,
4680                      mint=None, maxt=None,
4681                      mean_stage=0,
4682                      origin=None,
4683                      hole_points_UTM=None,
4684                      zscale=1):
4685    """   
4686    Convert URS C binary format for wave propagation to
4687    sww format native to abstract_2d_finite_volumes.
4688
4689
4690    Specify only basename_in and read files of the form
4691    basefilename-z-mux, basefilename-e-mux and
4692    basefilename-n-mux containing relative height,
4693    x-velocity and y-velocity, respectively.
4694
4695    Also convert latitude and longitude to UTM. All coordinates are
4696    assumed to be given in the GDA94 datum. The latitude and longitude
4697    information is assumed ungridded grid.
4698
4699    min's and max's: If omitted - full extend is used.
4700    To include a value min ans max may equal it.
4701    Lat and lon are assumed to be in decimal degrees.
4702   
4703    origin is a 3-tuple with geo referenced
4704    UTM coordinates (zone, easting, northing)
4705    It will be the origin of the sww file. This shouldn't be used,
4706    since all of anuga should be able to handle an arbitary origin.
4707    The mux point info is NOT relative to this origin.
4708
4709
4710    URS C binary format has data orgainised as TIME, LONGITUDE, LATITUDE
4711    which means that latitude is the fastest
4712    varying dimension (row major order, so to speak)
4713
4714    In URS C binary the latitudes and longitudes are in assending order.
4715
4716    Note, interpolations of the resulting sww file will be different
4717    from results of urs2sww.  This is due to the interpolation
4718    function used, and the different grid structure between urs2sww
4719    and this function.
4720   
4721    Interpolating data that has an underlying gridded source can
4722    easily end up with different values, depending on the underlying
4723    mesh.
4724
4725    consider these 4 points
4726    50  -50
4727
4728    0     0
4729
4730    The grid can be
4731     -
4732    |\|    A
4733     -
4734     or;
4735      -
4736     |/|   B
4737      -
4738      If a point is just below the center of the midpoint, it will have a
4739      +ve value in grid A and a -ve value in grid B.
4740    """ 
4741    from anuga.mesh_engine.mesh_engine import NoTrianglesError
4742    from anuga.pmesh.mesh import Mesh
4743
4744    files_in = [basename_in + WAVEHEIGHT_MUX_LABEL,
4745                basename_in + EAST_VELOCITY_LABEL,
4746                basename_in + NORTH_VELOCITY_LABEL]
4747    quantities = ['HA','UA','VA']
4748
4749    # instanciate urs_points of the three mux files.
4750    mux = {}
4751    for quantity, file in map(None, quantities, files_in):
4752        mux[quantity] = Urs_points(file)
4753       
4754    # Could check that the depth is the same. (hashing)
4755
4756    # handle to a mux file to do depth stuff
4757    a_mux = mux[quantities[0]]
4758   
4759    # Convert to utm
4760    lat = a_mux.lonlatdep[:,1]
4761    long = a_mux.lonlatdep[:,0]
4762    points_utm, zone = convert_from_latlon_to_utm( \
4763        latitudes=lat, longitudes=long)
4764    #print "points_utm", points_utm
4765    #print "zone", zone
4766
4767    elevation = a_mux.lonlatdep[:,2] * -1 #
4768   
4769    # grid ( create a mesh from the selected points)
4770    # This mesh has a problem.  Triangles are streched over ungridded areas.
4771    #  If these areas could be described as holes in pmesh, that would be great
4772
4773    # I can't just get the user to selection a point in the middle.
4774    # A boundary is needed around these points.
4775    # But if the zone of points is obvious enough auto-segment should do
4776    # a good boundary.
4777    mesh = Mesh()
4778    mesh.add_vertices(points_utm)
4779    mesh.auto_segment(smooth_indents=True, expand_pinch=True)
4780    # To try and avoid alpha shape 'hugging' too much
4781    mesh.auto_segment( mesh.shape.get_alpha()*1.1 )
4782    if hole_points_UTM is not None:
4783        point = ensure_absolute(hole_points_UTM)
4784        mesh.add_hole(point[0], point[1])
4785    try:
4786        mesh.generate_mesh(minimum_triangle_angle=0.0, verbose=False)
4787    except NoTrianglesError:
4788        # This is a bit of a hack, going in and changing the
4789        # data structure.
4790        mesh.holes = []
4791        mesh.generate_mesh(minimum_triangle_angle=0.0, verbose=False)
4792    mesh_dic = mesh.Mesh2MeshList()
4793
4794    #mesh.export_mesh_file(basename_in + '_168.tsh')
4795    #import sys; sys.exit()
4796    # These are the times of the mux file
4797    mux_times = []
4798    for i in range(a_mux.time_step_count):
4799        mux_times.append(a_mux.time_step * i) 
4800    mux_times_start_i, mux_times_fin_i = mux2sww_time(mux_times, mint, maxt)
4801    times = mux_times[mux_times_start_i:mux_times_fin_i]
4802   
4803    if mux_times_start_i == mux_times_fin_i:
4804        # Close the mux files
4805        for quantity, file in map(None, quantities, files_in):
4806            mux[quantity].close()
4807        msg="Due to mint and maxt there's no time info in the boundary SWW."
4808        raise Exception, msg
4809       
4810    # If this raise is removed there is currently no downstream errors
4811           
4812    points_utm=ensure_numeric(points_utm)
4813    assert ensure_numeric(mesh_dic['generatedpointlist']) == \
4814           ensure_numeric(points_utm)
4815   
4816    volumes = mesh_dic['generatedtrianglelist']
4817   
4818    # write sww intro and grid stuff.   
4819    if basename_out is None:
4820        swwname = basename_in + '.sww'
4821    else:
4822        swwname = basename_out + '.sww'
4823
4824    if verbose: print 'Output to ', swwname
4825    outfile = NetCDFFile(swwname, 'w')
4826    # For a different way of doing this, check out tsh2sww
4827    # work out sww_times and the index range this covers
4828    sww = Write_sww()
4829    sww.store_header(outfile, times, len(volumes), len(points_utm),
4830                     verbose=verbose,sww_precision=Float)
4831    outfile.mean_stage = mean_stage
4832    outfile.zscale = zscale
4833
4834    sww.store_triangulation(outfile, points_utm, volumes,
4835                            elevation, zone,  new_origin=origin,
4836                            verbose=verbose)
4837   
4838    if verbose: print 'Converting quantities'
4839    j = 0
4840    # Read in a time slice from each mux file and write it to the sww file
4841    for ha, ua, va in map(None, mux['HA'], mux['UA'], mux['VA']):
4842        if j >= mux_times_start_i and j < mux_times_fin_i:
4843            stage = zscale*ha + mean_stage
4844            h = stage - elevation
4845            xmomentum = ua*h
4846            ymomentum = -1*va*h # -1 since in mux files south is positive.
4847            sww.store_quantities(outfile, 
4848                                 slice_index=j - mux_times_start_i,
4849                                 verbose=verbose,
4850                                 stage=stage,
4851                                 xmomentum=xmomentum,
4852                                 ymomentum=ymomentum,
4853                                 sww_precision=Float)
4854        j += 1
4855    if verbose: sww.verbose_quantities(outfile)
4856    outfile.close()
4857    #
4858    # Do some conversions while writing the sww file
4859
4860    ##################################
4861    # READ MUX2 FILES line of points #
4862    ##################################
4863
4864WAVEHEIGHT_MUX2_LABEL = '-z-mux2'
4865EAST_VELOCITY_MUX2_LABEL =  '-e-mux2'
4866NORTH_VELOCITY_MUX2_LABEL =  '-n-mux2'   
4867
4868def read_mux2_py(filenames,
4869                 weights,
4870                 permutation=None,
4871                 verbose=False):
4872    """Access the mux files specified in the filenames list. Combine the
4873       data found therin as a weighted linear sum as specifed by the weights.
4874       If permutation is None or empty extract timeseries data for all gauges within the
4875       files.
4876         
4877       Input:
4878           filenames:   List of filenames specifiying the file containing the
4879                        timeseries data (mux2 format) for each source
4880           weights:     Weights associated with each source
4881           permutation: Specifies the gauge numbers that for which data is to be
4882                        extracted
4883    """ 
4884
4885    from Numeric import ones,Float,compress,zeros,arange
4886    from urs_ext import read_mux2
4887
4888    numSrc=len(filenames)
4889
4890    file_params=-1*ones(3,Float)#[nsta,dt,nt]
4891   
4892    # Convert verbose to int C flag
4893    if verbose:
4894        verbose=1
4895    else:
4896        verbose=0
4897       
4898       
4899    #msg = 'I got the permutation vector:' + str(permutation)
4900    #assert permutation is not None, msg
4901    if permutation is None:
4902        permutation = ensure_numeric([], Float)   
4903
4904    # Call underlying C implementation urs2sts_ext.c   
4905    data=read_mux2(numSrc,filenames,weights,file_params,permutation,verbose)
4906
4907    msg='File parameter values were not read in correctly from c file'
4908    assert len(compress(file_params>0,file_params))!=0,msg
4909   
4910    msg='The number of stations specifed in the c array and in the file are inconsistent'
4911    assert file_params[0] >= len(permutation)
4912   
4913    msg='The number of stations returned is inconsistent with the requested number'   
4914    assert len(permutation) == 0 or len(permutation) == data.shape[0], msg
4915   
4916    nsta=int(file_params[0])
4917    msg='Must have at least one station'
4918    assert nsta>0,msg
4919   
4920    dt=file_params[1]
4921    msg='Must have a postive timestep'
4922    assert dt>0,msg
4923   
4924    nt=int(file_params[2])
4925    msg='Must have at least one gauge value'
4926    assert nt>0,msg
4927   
4928    OFFSET=5 # Number of site parameters p passed back with data
4929             # p=[geolat,geolon,depth,start_tstep,finish_tstep]
4930
4931    parameters_index = data.shape[1]-OFFSET         
4932             
4933    times=dt*arange(parameters_index)   
4934    latitudes=zeros(data.shape[0], Float)
4935    longitudes=zeros(data.shape[0], Float)
4936    elevation=zeros(data.shape[0], Float)
4937    quantity=zeros((data.shape[0], parameters_index), Float)
4938   
4939   
4940    starttime=1e16
4941    for i in range(0, data.shape[0]):
4942        latitudes[i]=data[i][parameters_index]
4943        longitudes[i]=data[i][parameters_index+1]
4944        elevation[i]=-data[i][parameters_index+2]
4945        quantity[i][:]=data[i][:parameters_index] # Was data[i][:-OFFSET]
4946       
4947        starttime=min(dt*data[i][parameters_index+3],starttime)
4948       
4949    return times, latitudes, longitudes, elevation, quantity, starttime
4950   
4951
4952def mux2sww_time(mux_times, mint, maxt):
4953    """
4954    """
4955
4956    if mint == None:
4957        mux_times_start_i = 0
4958    else:
4959        mux_times_start_i = searchsorted(mux_times, mint)
4960       
4961    if maxt == None:
4962        mux_times_fin_i = len(mux_times)
4963    else:
4964        maxt += 0.5 # so if you specify a time where there is
4965                    # data that time will be included
4966        mux_times_fin_i = searchsorted(mux_times, maxt)
4967
4968    return mux_times_start_i, mux_times_fin_i
4969
4970
4971def urs2sts(basename_in, basename_out=None, 
4972            weights=None,
4973            verbose=False, 
4974            origin=None,
4975            mean_stage=0.0, 
4976            zscale=1.0,
4977            ordering_filename=None):
4978    """Convert URS mux2 format for wave propagation to sts format
4979
4980    Also convert latitude and longitude to UTM. All coordinates are
4981    assumed to be given in the GDA94 datum
4982
4983    origin is a 3-tuple with geo referenced
4984    UTM coordinates (zone, easting, northing)
4985   
4986    inputs:
4987       
4988    basename_in: list of source file prefixes
4989   
4990        These are combined with the extensions:
4991        WAVEHEIGHT_MUX2_LABEL = '-z-mux2' for stage
4992        EAST_VELOCITY_MUX2_LABEL =  '-e-mux2' xmomentum
4993        NORTH_VELOCITY_MUX2_LABEL =  '-n-mux2' and ymomentum
4994   
4995        to create a 2D list of mux2 file. The rows are associated with each
4996        quantity and must have the above extensions
4997        the columns are the list of file prefixes.
4998   
4999    ordering: a .txt file name specifying which mux2 gauge points are
5000              to be stored. This is indicated by the index of the gauge
5001              in the ordering file.
5002   
5003              ordering file format:
5004              1st line:    'index,longitude,latitude\n'
5005              other lines: index,longitude,latitude
5006             
5007              If ordering is None or ordering file is empty then
5008               all points are taken in the order they
5009              appear in the mux2 file.
5010   
5011   
5012    output:
5013      basename_out: name of sts file in which mux2 data is stored.
5014   
5015   
5016    """
5017    import os
5018    from Scientific.IO.NetCDF import NetCDFFile
5019    from Numeric import Float, Int, Int32, searchsorted, zeros, array
5020    from Numeric import allclose, around,ones,Float
5021    from types import ListType,StringType
5022    from operator import __and__
5023   
5024    if not isinstance(basename_in, ListType):
5025        if verbose: print 'Reading single source'
5026        basename_in=[basename_in]
5027
5028    # This is the value used in the mux file format to indicate NAN data
5029    # FIXME (Ole): This should be changed everywhere to IEEE NAN when we upgrade to Numpy
5030    NODATA = 99   
5031
5032    # Check that basename is a list of strings
5033    if not reduce(__and__, map(lambda z:isinstance(z,StringType), basename_in)):
5034        msg= 'basename_in must be a string or list of strings'
5035        raise Exception, msg
5036
5037    # Find the number of sources to be used
5038    numSrc=len(basename_in)
5039
5040    # A weight must be specified for each source
5041    if weights is None:
5042        # Default is equal weighting
5043        weights=ones(numSrc,Float)/numSrc
5044    else:
5045        weights = ensure_numeric(weights)
5046        msg = 'When combining multiple sources must specify a weight for '+\
5047              'mux2 source file'
5048        assert len(weights) == numSrc, msg
5049       
5050    if verbose:
5051        print 'Weights used in urs2sts:', weights
5052
5053    # Check output filename   
5054    if basename_out is None:
5055        msg = 'STS filename must be specified as basename_out'
5056        msg += ' in function urs2sts'
5057        raise Exception, msg
5058   
5059    if basename_out.endswith('.sts'):
5060        stsname = basename_out
5061    else:   
5062        stsname = basename_out + '.sts'       
5063
5064    # Create input filenames from basenames and check their existence
5065    files_in=[[],[],[]]
5066    for files in basename_in:
5067        files_in[0].append(files + WAVEHEIGHT_MUX2_LABEL),
5068        files_in[1].append(files + EAST_VELOCITY_MUX2_LABEL)
5069        files_in[2].append(files + NORTH_VELOCITY_MUX2_LABEL)
5070   
5071    quantities = ['HA','UA','VA'] # Quantity names used in the MUX2 format
5072    for i in range(len(quantities)): 
5073        for file_in in files_in[i]:
5074            if (os.access(file_in, os.F_OK) == 0):
5075                msg = 'File %s does not exist or is not accessible' %file_in
5076                raise IOError, msg
5077
5078    # Establish permutation array
5079    if ordering_filename is not None:
5080
5081        if verbose is True:
5082            print 'Reading ordering file', ordering_filename
5083        # Read ordering file
5084        try:
5085            fid=open(ordering_filename, 'r')
5086            file_header=fid.readline().split(',')
5087            ordering_lines=fid.readlines()
5088            fid.close()
5089        except:
5090            msg = 'Cannot open %s'%ordering_filename
5091            raise Exception, msg
5092
5093        reference_header = 'index, longitude, latitude\n'
5094        reference_header_split = reference_header.split(',')
5095        for i in range(3):
5096            if not file_header[i].strip() == reference_header_split[i].strip():
5097                msg = 'File must contain header: '+reference_header
5098                raise Exception, msg
5099
5100        if len(ordering_lines)<2:
5101            msg = 'File must contain at least two points'
5102            raise Exception, msg
5103
5104        permutation = ensure_numeric([int(line.split(',')[0]) for line in ordering_lines])
5105    else:
5106        permutation = None
5107
5108
5109    #print 'permutation', permutation
5110    # Read MUX2 files
5111    if (verbose): print 'reading mux2 file'
5112    mux={}
5113    for i, quantity in enumerate(quantities):
5114   
5115        # For each quantity read the associated list of source mux2 file with
5116        # extention associated with that quantity
5117        times,\
5118        latitudes,\
5119        longitudes,\
5120        elevation,\
5121        mux[quantity],\
5122        starttime = read_mux2_py(files_in[i], weights, permutation, verbose=verbose)
5123   
5124        # Check that all quantities have consistent time and space information     
5125        if quantity!=quantities[0]:
5126            msg='%s, %s and %s have inconsistent gauge data'\
5127                %(files_in[0], files_in[1], files_in[2])
5128            assert allclose(times, times_old), msg
5129            assert allclose(latitudes, latitudes_old), msg
5130            assert allclose(longitudes, longitudes_old), msg
5131            assert allclose(elevation, elevation_old), msg
5132            assert allclose(starttime, starttime_old), msg
5133        times_old=times
5134        latitudes_old=latitudes
5135        longitudes_old=longitudes
5136        elevation_old=elevation
5137        starttime_old=starttime
5138
5139
5140       
5141       
5142       
5143        # Self check - can be removed to improve speed
5144
5145        #ref_longitudes = [float(line.split(',')[1]) for line in ordering_lines]               
5146        #ref_latitudes = [float(line.split(',')[2]) for line in ordering_lines]       
5147        #
5148        #msg = 'Longitudes specified in ordering file do not match those found in mux files'
5149        #msg += 'I got %s instead of %s (only beginning shown)'\
5150        #    %(str(longitudes[:10]) + '...',
5151        #      str(ref_longitudes[:10]) + '...')       
5152        #assert allclose(longitudes, ref_longitudes), msg       
5153        #
5154        #msg = 'Latitudes specified in ordering file do not match those found in mux files. '
5155        #msg += 'I got %s instead of %s (only beginning shown)'\
5156        #    %(str(latitudes[:10]) + '...',
5157        #      str(ref_latitudes[:10]) + '...')               
5158        #assert allclose(latitudes, ref_latitudes), msg
5159       
5160               
5161       
5162    # Store timeseries in NetCDF STS file   
5163    msg='File is empty and or clipped region not in file region'
5164    assert len(latitudes>0),msg
5165
5166    number_of_points = latitudes.shape[0]      # Number of stations retrieved
5167    number_of_times = times.shape[0]           # Number of timesteps
5168    number_of_latitudes = latitudes.shape[0]   # Number latitudes
5169    number_of_longitudes = longitudes.shape[0] # Number longitudes
5170
5171    # NetCDF file definition
5172    outfile = NetCDFFile(stsname, 'w')
5173
5174    description = 'Converted from URS mux2 files: %s'\
5175                  %(basename_in)
5176   
5177    # Create new file
5178    sts = Write_sts()
5179
5180    sts.store_header(outfile, 
5181                     times+starttime,
5182                     number_of_points, 
5183                     description=description,
5184                     verbose=verbose,
5185                     sts_precision=Float)
5186   
5187    # Store
5188    from anuga.coordinate_transforms.redfearn import redfearn
5189    x = zeros(number_of_points, Float)  # Easting
5190    y = zeros(number_of_points, Float)  # Northing
5191
5192    # Check zone boundaries
5193    refzone, _, _ = redfearn(latitudes[0],longitudes[0]) 
5194
5195    for i in range(number_of_points):
5196        zone, easting, northing = redfearn(latitudes[i],longitudes[i])
5197        x[i] = easting
5198        y[i] = northing
5199        msg='all sts gauges need to be in the same zone'
5200        assert zone==refzone,msg
5201
5202    if origin is None:
5203        origin = Geo_reference(refzone,min(x),min(y))
5204    geo_ref = write_NetCDF_georeference(origin, outfile)
5205
5206
5207   
5208    #print geo_ref.get_xllcorner()
5209    #print geo_ref.get_yllcorner()
5210
5211    elevation = resize(elevation,outfile.variables['elevation'][:].shape)
5212    outfile.variables['x'][:] = x - geo_ref.get_xllcorner()
5213    outfile.variables['y'][:] = y - geo_ref.get_yllcorner()
5214    outfile.variables['elevation'][:] = elevation
5215
5216    stage = outfile.variables['stage']
5217    xmomentum = outfile.variables['xmomentum']
5218    ymomentum = outfile.variables['ymomentum']
5219
5220    if verbose: print 'Converting quantities'
5221    for j in range(len(times)):
5222        for i in range(number_of_points):
5223            ha = mux['HA'][i,j]
5224            ua = mux['UA'][i,j]
5225            va = mux['VA'][i,j]
5226            if ha == NODATA:
5227                if verbose:
5228                    msg = 'Setting nodata value %d to 0 at time = %f, point = %d'\
5229                          %(ha, times[j], i)
5230                    print msg
5231                ha = 0.0
5232                ua = 0.0
5233                va = 0.0
5234               
5235            w = zscale*ha + mean_stage
5236            h=w-elevation[i]
5237            stage[j,i] = w
5238
5239            xmomentum[j,i] = ua*h
5240            ymomentum[j,i] = va*h
5241
5242    outfile.close()
5243
5244def create_sts_boundary(stsname):
5245    """
5246        Create boundary segments from .sts file. Points can be stored in
5247    arbitrary order within the .sts file. The order in which the .sts points
5248    make up the boundary are given in order.txt file
5249   
5250    FIXME:
5251    Point coordinates are stored in relative eastings and northings.
5252    But boundary is produced in absolute coordinates
5253   
5254    """
5255    try:
5256        fid = NetCDFFile(stsname+'.sts', 'r')
5257    except:
5258        msg = 'Cannot open %s'%stsname+'.sts'
5259
5260
5261    xllcorner = fid.xllcorner[0]
5262    yllcorner = fid.yllcorner[0]
5263    #Points stored in sts file are normalised to [xllcorner,yllcorner] but
5264    #we cannot assume that boundary polygon will be. At least the
5265    #additional points specified by the user after this function is called
5266    x = fid.variables['x'][:]+xllcorner
5267    y = fid.variables['y'][:]+yllcorner
5268
5269    x = reshape(x, (len(x),1))
5270    y = reshape(y, (len(y),1))
5271    sts_points = concatenate((x,y), axis=1)
5272
5273    return sts_points.tolist()
5274
5275class Write_sww:
5276    from anuga.shallow_water.shallow_water_domain import Domain
5277
5278    # FIXME (Ole): Hardwiring the conserved quantities like
5279    # this could be a problem. I would prefer taking them from
5280    # the instantiation of Domain.
5281    #
5282    # (DSG) There is not always a Domain instance when Write_sww is used.
5283    # Check to see if this is the same level of hardwiring as is in
5284    # shallow water doamain.
5285   
5286    sww_quantities = Domain.conserved_quantities
5287
5288
5289    RANGE = '_range'
5290    EXTREMA = ':extrema'
5291
5292    def __init__(self):
5293        pass
5294   
5295    def store_header(self,
5296                     outfile,
5297                     times,
5298                     number_of_volumes,
5299                     number_of_points,
5300                     description='Converted from XXX',
5301                     smoothing=True,
5302                     order=1,
5303                     sww_precision=Float32,
5304                     verbose=False):
5305        """
5306        outfile - the name of the file that will be written
5307        times - A list of the time slice times OR a start time
5308        Note, if a list is given the info will be made relative.
5309        number_of_volumes - the number of triangles
5310        """
5311   
5312        outfile.institution = 'Geoscience Australia'
5313        outfile.description = description
5314
5315        # For sww compatibility
5316        if smoothing is True:
5317            # Smoothing to be depreciated
5318            outfile.smoothing = 'Yes'
5319            outfile.vertices_are_stored_uniquely = 'False'
5320        else:
5321            # Smoothing to be depreciated
5322            outfile.smoothing = 'No'
5323            outfile.vertices_are_stored_uniquely = 'True'
5324        outfile.order = order
5325
5326        try:
5327            revision_number = get_revision_number()
5328        except:
5329            revision_number = None
5330        # Allow None to be stored as a string               
5331        outfile.revision_number = str(revision_number) 
5332
5333
5334       
5335        # times - A list or array of the time slice times OR a start time
5336        # times = ensure_numeric(times)
5337        # Start time in seconds since the epoch (midnight 1/1/1970)
5338
5339        # This is being used to seperate one number from a list.
5340        # what it is actually doing is sorting lists from numeric arrays.
5341        if type(times) is list or type(times) is ArrayType: 
5342            number_of_times = len(times)
5343            times = ensure_numeric(times) 
5344            if number_of_times == 0:
5345                starttime = 0
5346            else:
5347                starttime = times[0]
5348                times = times - starttime  #Store relative times
5349        else:
5350            number_of_times = 0
5351            starttime = times
5352            #times = ensure_numeric([])
5353        outfile.starttime = starttime
5354        # dimension definitions
5355        outfile.createDimension('number_of_volumes', number_of_volumes)
5356        outfile.createDimension('number_of_vertices', 3)
5357        outfile.createDimension('numbers_in_range', 2)
5358   
5359        if smoothing is True:
5360            outfile.createDimension('number_of_points', number_of_points)
5361       
5362            # FIXME(Ole): This will cause sww files for paralle domains to
5363            # have ghost nodes stored (but not used by triangles).
5364            # To clean this up, we have to change get_vertex_values and
5365            # friends in quantity.py (but I can't be bothered right now)
5366        else:
5367            outfile.createDimension('number_of_points', 3*number_of_volumes)
5368        outfile.createDimension('number_of_timesteps', number_of_times)
5369
5370        # variable definitions
5371        outfile.createVariable('x', sww_precision, ('number_of_points',))
5372        outfile.createVariable('y', sww_precision, ('number_of_points',))
5373        outfile.createVariable('elevation', sww_precision, ('number_of_points',))
5374        q = 'elevation'
5375        outfile.createVariable(q+Write_sww.RANGE, sww_precision,
5376                               ('numbers_in_range',))
5377
5378
5379        # Initialise ranges with small and large sentinels.
5380        # If this was in pure Python we could have used None sensibly
5381        outfile.variables[q+Write_sww.RANGE][0] = max_float  # Min
5382        outfile.variables[q+Write_sww.RANGE][1] = -max_float # Max
5383
5384        # FIXME: Backwards compatibility
5385        outfile.createVariable('z', sww_precision, ('number_of_points',))
5386        #################################
5387
5388        outfile.createVariable('volumes', Int, ('number_of_volumes',
5389                                                'number_of_vertices'))
5390        # Doing sww_precision instead of Float gives cast errors.
5391        outfile.createVariable('time', Float,
5392                               ('number_of_timesteps',))
5393       
5394        for q in Write_sww.sww_quantities:
5395            outfile.createVariable(q, sww_precision,
5396                                   ('number_of_timesteps',
5397                                    'number_of_points')) 
5398            outfile.createVariable(q+Write_sww.RANGE, sww_precision,
5399                                   ('numbers_in_range',))
5400
5401            # Initialise ranges with small and large sentinels.
5402            # If this was in pure Python we could have used None sensibly
5403            outfile.variables[q+Write_sww.RANGE][0] = max_float  # Min
5404            outfile.variables[q+Write_sww.RANGE][1] = -max_float # Max
5405           
5406        if type(times) is list or type(times) is ArrayType: 
5407            outfile.variables['time'][:] = times    #Store time relative
5408           
5409        if verbose:
5410            print '------------------------------------------------'
5411            print 'Statistics:'
5412            print '    t in [%f, %f], len(t) == %d'\
5413                  %(min(times.flat), max(times.flat), len(times.flat))
5414
5415       
5416    def store_triangulation(self,
5417                            outfile,
5418                            points_utm,
5419                            volumes,
5420                            elevation, zone=None, new_origin=None, 
5421                            points_georeference=None, verbose=False):
5422        """
5423       
5424        new_origin - qa georeference that the points can be set to. (Maybe
5425        do this before calling this function.)
5426       
5427        points_utm - currently a list or array of the points in UTM.
5428        points_georeference - the georeference of the points_utm
5429       
5430        How about passing new_origin and current_origin.
5431        If you get both, do a convertion from the old to the new.
5432       
5433        If you only get new_origin, the points are absolute,
5434        convert to relative
5435       
5436        if you only get the current_origin the points are relative, store
5437        as relative.
5438       
5439        if you get no georefs create a new georef based on the minimums of
5440        points_utm.  (Another option would be to default to absolute)
5441       
5442        Yes, and this is done in another part of the code.
5443        Probably geospatial.
5444       
5445        If you don't supply either geo_refs, then supply a zone. If not
5446        the default zone will be used.
5447       
5448       
5449        precon
5450       
5451        header has been called.
5452        """
5453       
5454        number_of_points = len(points_utm)   
5455        volumes = array(volumes) 
5456        points_utm = array(points_utm)
5457
5458        # given the two geo_refs and the points, do the stuff
5459        # described in the method header
5460        # if this is needed else where, pull out as a function
5461        points_georeference = ensure_geo_reference(points_georeference)
5462        new_origin = ensure_geo_reference(new_origin)
5463        if new_origin is None and points_georeference is not None:
5464            points = points_utm
5465            geo_ref = points_georeference
5466        else:
5467            if new_origin is None:
5468                new_origin = Geo_reference(zone,min(points_utm[:,0]),
5469                                           min(points_utm[:,1]))
5470            points = new_origin.change_points_geo_ref(points_utm,
5471                                                      points_georeference)
5472            geo_ref = new_origin
5473
5474        # At this stage I need a georef and points
5475        # the points are relative to the georef
5476        geo_ref.write_NetCDF(outfile)
5477   
5478        # This will put the geo ref in the middle
5479        #geo_ref=Geo_reference(refzone,(max(x)+min(x))/2.0,(max(x)+min(y))/2.)
5480       
5481        x =  points[:,0]
5482        y =  points[:,1]
5483        z = outfile.variables['z'][:]
5484   
5485        if verbose:
5486            print '------------------------------------------------'
5487            print 'More Statistics:'
5488            print '  Extent (/lon):'
5489            print '    x in [%f, %f], len(lat) == %d'\
5490                  %(min(x), max(x),
5491                    len(x))
5492            print '    y in [%f, %f], len(lon) == %d'\
5493                  %(min(y), max(y),
5494                    len(y))
5495            print '    z in [%f, %f], len(z) == %d'\
5496                  %(min(elevation), max(elevation),
5497                    len(elevation))
5498            print 'geo_ref: ',geo_ref
5499            print '------------------------------------------------'
5500           
5501        #z = resize(bath_grid,outfile.variables['z'][:].shape)
5502        #print "points[:,0]", points[:,0]
5503        outfile.variables['x'][:] = points[:,0] #- geo_ref.get_xllcorner()
5504        outfile.variables['y'][:] = points[:,1] #- geo_ref.get_yllcorner()
5505        outfile.variables['z'][:] = elevation
5506        outfile.variables['elevation'][:] = elevation  #FIXME HACK
5507        outfile.variables['volumes'][:] = volumes.astype(Int32) #On Opteron 64
5508
5509        q = 'elevation'
5510        # This updates the _range values
5511        outfile.variables[q+Write_sww.RANGE][0] = min(elevation)
5512        outfile.variables[q+Write_sww.RANGE][1] = max(elevation)
5513
5514
5515    def store_quantities(self, outfile, sww_precision=Float32,
5516                         slice_index=None, time=None,
5517                         verbose=False, **quant):
5518        """
5519        Write the quantity info.
5520
5521        **quant is extra keyword arguments passed in. These must be
5522          the sww quantities, currently; stage, xmomentum, ymomentum.
5523       
5524        if the time array is already been built, use the slice_index
5525        to specify the index.
5526       
5527        Otherwise, use time to increase the time dimension
5528
5529        Maybe make this general, but the viewer assumes these quantities,
5530        so maybe we don't want it general - unless the viewer is general
5531       
5532        precon
5533        triangulation and
5534        header have been called.
5535        """
5536
5537        if time is not None:
5538            file_time = outfile.variables['time']
5539            slice_index = len(file_time)
5540            file_time[slice_index] = time   
5541
5542        # Write the conserved quantities from Domain.
5543        # Typically stage,  xmomentum, ymomentum
5544        # other quantities will be ignored, silently.
5545        # Also write the ranges: stage_range,
5546        # xmomentum_range and ymomentum_range
5547        for q in Write_sww.sww_quantities:
5548            if not quant.has_key(q):
5549                msg = 'SWW file can not write quantity %s' %q
5550                raise NewQuantity, msg
5551            else:
5552                q_values = quant[q]
5553                outfile.variables[q][slice_index] = \
5554                                q_values.astype(sww_precision)
5555
5556                # This updates the _range values
5557                q_range = outfile.variables[q+Write_sww.RANGE][:]
5558                q_values_min = min(q_values)
5559                if q_values_min < q_range[0]:
5560                    outfile.variables[q+Write_sww.RANGE][0] = q_values_min
5561                q_values_max = max(q_values)
5562                if q_values_max > q_range[1]:
5563                    outfile.variables[q+Write_sww.RANGE][1] = q_values_max
5564
5565    def verbose_quantities(self, outfile):
5566        print '------------------------------------------------'
5567        print 'More Statistics:'
5568        for q in Write_sww.sww_quantities:
5569            print %s in [%f, %f]' %(q,
5570                                       outfile.variables[q+Write_sww.RANGE][0],
5571                                       outfile.variables[q+Write_sww.RANGE][1])
5572        print '------------------------------------------------'
5573
5574
5575       
5576def obsolete_write_sww_time_slices(outfile, has, uas, vas, elevation,
5577                         mean_stage=0, zscale=1,
5578                         verbose=False):   
5579    #Time stepping
5580    stage = outfile.variables['stage']
5581    xmomentum = outfile.variables['xmomentum']
5582    ymomentum = outfile.variables['ymomentum']
5583
5584    n = len(has)
5585    j=0
5586    for ha, ua, va in map(None, has, uas, vas):
5587        if verbose and j%((n+10)/10)==0: print '  Doing %d of %d' %(j, n)
5588        w = zscale*ha + mean_stage
5589        stage[j] = w
5590        h = w - elevation
5591        xmomentum[j] = ua*h
5592        ymomentum[j] = -1*va*#  -1 since in mux files south is positive.
5593        j += 1
5594   
5595def urs2txt(basename_in, location_index=None):
5596    """
5597    Not finished or tested
5598    """
5599   
5600    files_in = [basename_in + WAVEHEIGHT_MUX_LABEL,
5601                basename_in + EAST_VELOCITY_LABEL,
5602                basename_in + NORTH_VELOCITY_LABEL]
5603    quantities = ['HA','UA','VA']
5604
5605    d = ","
5606   
5607    # instanciate urs_points of the three mux files.
5608    mux = {}
5609    for quantity, file in map(None, quantities, files_in):
5610        mux[quantity] = Urs_points(file)
5611       
5612    # Could check that the depth is the same. (hashing)
5613
5614    # handle to a mux file to do depth stuff
5615    a_mux = mux[quantities[0]]
5616   
5617    # Convert to utm
5618    latitudes = a_mux.lonlatdep[:,1]
5619    longitudes = a_mux.lonlatdep[:,0]
5620    points_utm, zone = convert_from_latlon_to_utm( \
5621        latitudes=latitudes, longitudes=longitudes)
5622    #print "points_utm", points_utm
5623    #print "zone", zone
5624    depths = a_mux.lonlatdep[:,2]  #
5625   
5626    fid = open(basename_in + '.txt', 'w')
5627
5628    fid.write("zone: " + str(zone) + "\n")
5629
5630    if location_index is not None:
5631        #Title
5632        li = location_index
5633        fid.write('location_index'+d+'lat'+d+ 'long' +d+ 'Easting' +d+ \
5634                  'Northing' + "\n")
5635        fid.write(str(li) +d+ str(latitudes[li])+d+ \
5636              str(longitudes[li]) +d+ str(points_utm[li][0]) +d+ \
5637              str(points_utm[li][01]) + "\n")
5638
5639    # the non-time dependent stuff
5640    #Title
5641    fid.write('location_index'+d+'lat'+d+ 'long' +d+ 'Easting' +d+ \
5642              'Northing' +d+ 'depth m' + "\n")
5643    i = 0
5644    for depth, point_utm, lat, long in map(None, depths,
5645                                               points_utm, latitudes,
5646                                               longitudes):
5647       
5648        fid.write(str(i) +d+ str(lat)+d+ str(long) +d+ str(point_utm[0]) +d+ \
5649                  str(point_utm[01]) +d+ str(depth) + "\n")
5650        i +=1
5651    #Time dependent
5652    if location_index is not None:
5653        time_step = a_mux.time_step
5654        i = 0
5655        #Title
5656        fid.write('time' +d+ 'HA depth m'+d+ \
5657                 'UA momentum East x m/sec' +d+ 'VA momentum North y m/sec' \
5658                      + "\n")
5659        for HA, UA, VA in map(None, mux['HA'], mux['UA'], mux['VA']):
5660            fid.write(str(i*time_step) +d+ str(HA[location_index])+d+ \
5661                      str(UA[location_index]) +d+ str(VA[location_index]) \
5662                      + "\n")
5663           
5664            i +=1
5665
5666class Write_sts:
5667
5668    sts_quantities = ['stage','xmomentum','ymomentum']
5669
5670
5671    RANGE = '_range'
5672    EXTREMA = ':extrema'
5673   
5674    def __init__(self):
5675        pass
5676
5677    def store_header(self,
5678                     outfile,
5679                     times,
5680                     number_of_points,
5681                     description='Converted from URS mux2 format',
5682                     sts_precision=Float32,
5683                     verbose=False):
5684        """
5685        outfile - the name of the file that will be written
5686        times - A list of the time slice times OR a start time
5687        Note, if a list is given the info will be made relative.
5688        number_of_points - the number of urs gauges sites
5689        """
5690
5691        outfile.institution = 'Geoscience Australia'
5692        outfile.description = description
5693
5694        try:
5695            revision_number = get_revision_number()
5696        except:
5697            revision_number = None
5698        # Allow None to be stored as a string               
5699        outfile.revision_number = str(revision_number) 
5700       
5701        # times - A list or array of the time slice times OR a start time
5702        # Start time in seconds since the epoch (midnight 1/1/1970)
5703
5704        # This is being used to seperate one number from a list.
5705        # what it is actually doing is sorting lists from numeric arrays.
5706        if type(times) is list or type(times) is ArrayType: 
5707            number_of_times = len(times)
5708            times = ensure_numeric(times) 
5709            if number_of_times == 0:
5710                starttime = 0
5711            else:
5712                starttime = times[0]
5713                times = times - starttime  #Store relative times
5714        else:
5715            number_of_times = 0
5716            starttime = times
5717
5718        outfile.starttime = starttime
5719
5720        # Dimension definitions
5721        outfile.createDimension('number_of_points', number_of_points)
5722        outfile.createDimension('number_of_timesteps', number_of_times)
5723        outfile.createDimension('numbers_in_range', 2)
5724
5725        # Variable definitions
5726        outfile.createVariable('x', sts_precision, ('number_of_points',))
5727        outfile.createVariable('y', sts_precision, ('number_of_points',))
5728        outfile.createVariable('elevation', sts_precision,('number_of_points',))
5729
5730        q = 'elevation'
5731        outfile.createVariable(q+Write_sts.RANGE, sts_precision,
5732                               ('numbers_in_range',))
5733
5734        # Initialise ranges with small and large sentinels.
5735        # If this was in pure Python we could have used None sensibly
5736        outfile.variables[q+Write_sts.RANGE][0] = max_float  # Min
5737        outfile.variables[q+Write_sts.RANGE][1] = -max_float # Max
5738
5739        # Doing sts_precision instead of Float gives cast errors.
5740        outfile.createVariable('time', Float, ('number_of_timesteps',))
5741
5742        for q in Write_sts.sts_quantities:
5743            outfile.createVariable(q, sts_precision,
5744                                   ('number_of_timesteps',
5745                                    'number_of_points'))
5746            outfile.createVariable(q+Write_sts.RANGE, sts_precision,
5747                                   ('numbers_in_range',))
5748            # Initialise ranges with small and large sentinels.
5749            # If this was in pure Python we could have used None sensibly
5750            outfile.variables[q+Write_sts.RANGE][0] = max_float  # Min
5751            outfile.variables[q+Write_sts.RANGE][1] = -max_float # Max
5752
5753        if type(times) is list or type(times) is ArrayType: 
5754            outfile.variables['time'][:] = times    #Store time relative
5755
5756        if verbose:
5757            print '------------------------------------------------'
5758            print 'Statistics:'
5759            print '    t in [%f, %f], len(t) == %d'\
5760                  %(min(times.flat), max(times.flat), len(times.flat))
5761
5762    def store_points(self,
5763                     outfile,
5764                     points_utm,
5765                     elevation, zone=None, new_origin=None, 
5766                     points_georeference=None, verbose=False):
5767
5768        """
5769        points_utm - currently a list or array of the points in UTM.
5770        points_georeference - the georeference of the points_utm
5771
5772        How about passing new_origin and current_origin.
5773        If you get both, do a convertion from the old to the new.
5774       
5775        If you only get new_origin, the points are absolute,
5776        convert to relative
5777       
5778        if you only get the current_origin the points are relative, store
5779        as relative.
5780       
5781        if you get no georefs create a new georef based on the minimums of
5782        points_utm.  (Another option would be to default to absolute)
5783       
5784        Yes, and this is done in another part of the code.
5785        Probably geospatial.
5786       
5787        If you don't supply either geo_refs, then supply a zone. If not
5788        the default zone will be used.
5789
5790        precondition:
5791             header has been called.
5792        """
5793
5794        number_of_points = len(points_utm)
5795        points_utm = array(points_utm)
5796
5797        # given the two geo_refs and the points, do the stuff
5798        # described in the method header
5799        points_georeference = ensure_geo_reference(points_georeference)
5800        new_origin = ensure_geo_reference(new_origin)
5801       
5802        if new_origin is None and points_georeference is not None:
5803            points = points_utm
5804            geo_ref = points_georeference
5805        else:
5806            if new_origin is None:
5807                new_origin = Geo_reference(zone,min(points_utm[:,0]),
5808                                           min(points_utm[:,1]))
5809            points = new_origin.change_points_geo_ref(points_utm,
5810                                                      points_georeference)
5811            geo_ref = new_origin
5812
5813        # At this stage I need a georef and points
5814        # the points are relative to the georef
5815        geo_ref.write_NetCDF(outfile)
5816
5817        x =  points[:,0]
5818        y =  points[:,1]
5819        z = outfile.variables['z'][:]
5820   
5821        if verbose:
5822            print '------------------------------------------------'
5823            print 'More Statistics:'
5824            print '  Extent (/lon):'
5825            print '    x in [%f, %f], len(lat) == %d'\
5826                  %(min(x), max(x),
5827                    len(x))
5828            print '    y in [%f, %f], len(lon) == %d'\
5829                  %(min(y), max(y),
5830                    len(y))
5831            print '    z in [%f, %f], len(z) == %d'\
5832                  %(min(elevation), max(elevation),
5833                    len(elevation))
5834            print 'geo_ref: ',geo_ref
5835            print '------------------------------------------------'
5836           
5837        #z = resize(bath_grid,outfile.variables['z'][:].shape)
5838        #print "points[:,0]", points[:,0]
5839        outfile.variables['x'][:] = points[:,0] #- geo_ref.get_xllcorner()
5840        outfile.variables['y'][:] = points[:,1] #- geo_ref.get_yllcorner()
5841        outfile.variables['z'][:] = elevation
5842        outfile.variables['elevation'][:] = elevation  #FIXME HACK4
5843
5844        q = 'elevation'
5845        # This updates the _range values
5846        outfile.variables[q+Write_sts.RANGE][0] = min(elevation)
5847        outfile.variables[q+Write_sts.RANGE][1] = max(elevation)
5848
5849    def store_quantities(self, outfile, sts_precision=Float32,
5850                         slice_index=None, time=None,
5851                         verbose=False, **quant):
5852       
5853        """
5854        Write the quantity info.
5855
5856        **quant is extra keyword arguments passed in. These must be
5857          the sts quantities, currently; stage.
5858       
5859        if the time array is already been built, use the slice_index
5860        to specify the index.
5861       
5862        Otherwise, use time to increase the time dimension
5863
5864        Maybe make this general, but the viewer assumes these quantities,
5865        so maybe we don't want it general - unless the viewer is general
5866       
5867        precondition:
5868            triangulation and
5869            header have been called.
5870        """
5871        if time is not None:
5872            file_time = outfile.variables['time']
5873            slice_index = len(file_time)
5874            file_time[slice_index] = time   
5875
5876        # Write the conserved quantities from Domain.
5877        # Typically stage,  xmomentum, ymomentum
5878        # other quantities will be ignored, silently.
5879        # Also write the ranges: stage_range
5880        for q in Write_sts.sts_quantities:
5881            if not quant.has_key(q):
5882                msg = 'STS file can not write quantity %s' %q
5883                raise NewQuantity, msg
5884            else:
5885                q_values = quant[q]
5886                outfile.variables[q][slice_index] = \
5887                                q_values.astype(sts_precision)
5888
5889                # This updates the _range values
5890                q_range = outfile.variables[q+Write_sts.RANGE][:]
5891                q_values_min = min(q_values)
5892                if q_values_min < q_range[0]:
5893                    outfile.variables[q+Write_sts.RANGE][0] = q_values_min
5894                q_values_max = max(q_values)
5895                if q_values_max > q_range[1]:
5896                    outfile.variables[q+Write_sts.RANGE][1] = q_values_max
5897
5898
5899   
5900class Urs_points:
5901    """
5902    Read the info in URS mux files.
5903
5904    for the quantities heres a correlation between the file names and
5905    what they mean;
5906    z-mux is height above sea level, m
5907    e-mux is velocity is Eastern direction, m/s
5908    n-mux is velocity is Northern direction, m/s   
5909    """
5910    def __init__(self,urs_file):
5911        self.iterated = False
5912        columns = 3 # long, lat , depth
5913        mux_file = open(urs_file, 'rb')
5914       
5915        # Number of points/stations
5916        (self.points_num,)= unpack('i',mux_file.read(4))
5917       
5918        # nt, int - Number of time steps
5919        (self.time_step_count,)= unpack('i',mux_file.read(4))
5920        #print "self.time_step_count", self.time_step_count
5921        #dt, float - time step, seconds
5922        (self.time_step,) = unpack('f', mux_file.read(4))
5923        #print "self.time_step", self.time_step
5924        msg = "Bad data in the urs file."
5925        if self.points_num < 0:
5926            mux_file.close()
5927            raise ANUGAError, msg
5928        if self.time_step_count < 0:
5929            mux_file.close()
5930            raise ANUGAError, msg
5931        if self.time_step < 0:
5932            mux_file.close()
5933            raise ANUGAError, msg
5934
5935        # the depth is in meters, and it is the distance from the ocean
5936        # to the sea bottom.
5937        lonlatdep = p_array.array('f')
5938        lonlatdep.read(mux_file, columns * self.points_num)
5939        lonlatdep = array(lonlatdep, typecode=Float)   
5940        lonlatdep = reshape(lonlatdep, (self.points_num, columns))
5941        #print 'lonlatdep',lonlatdep
5942        self.lonlatdep = lonlatdep
5943       
5944        self.mux_file = mux_file
5945        # check this array
5946
5947    def __iter__(self):
5948        """
5949        iterate over quantity data which is with respect to time.
5950
5951        Note: You can only interate once over an object
5952       
5953        returns quantity infomation for each time slice
5954        """
5955        msg =  "You can only interate once over a urs file."
5956        assert not self.iterated, msg
5957        self.iter_time_step = 0
5958        self.iterated = True
5959        return self
5960   
5961    def next(self):
5962        if self.time_step_count == self.iter_time_step:
5963            self.close()
5964            raise StopIteration
5965        #Read in a time slice  from mux file 
5966        hz_p_array = p_array.array('f')
5967        hz_p_array.read(self.mux_file, self.points_num)
5968        hz_p = array(hz_p_array, typecode=Float)
5969        self.iter_time_step += 1
5970       
5971        return hz_p
5972
5973    def close(self):
5974        self.mux_file.close()
5975       
5976    #### END URS UNGRIDDED 2 SWW ###
5977
5978       
5979def start_screen_catcher(dir_name=None, myid='', numprocs='', extra_info='',
5980                         verbose=True):
5981    """
5982    Used to store screen output and errors to file, if run on multiple
5983    processes eachprocessor will have its own output and error file.
5984   
5985    extra_info - is used as a string that can identify outputs with another
5986    string eg. '_other'
5987   
5988    FIXME: Would be good if you could suppress all the screen output and
5989    only save it to file... however it seems a bit tricky as this capture
5990    techique response to sys.stdout and by this time it is already printed out.
5991    """
5992   
5993    import sys
5994#    dir_name = dir_name
5995    if dir_name == None:
5996        dir_name=getcwd()
5997       
5998    if access(dir_name,W_OK) == 0:
5999        if verbose: print 'Making directory %s' %dir_name
6000      #  if verbose: print "myid", myid
6001        mkdir (dir_name,0777)
6002
6003    if myid <>'':
6004        myid = '_'+str(myid)
6005    if numprocs <>'':
6006        numprocs = '_'+str(numprocs)
6007    if extra_info <>'':
6008        extra_info = '_'+str(extra_info)
6009#    print 'hello1'
6010    screen_output_name = join(dir_name, "screen_output%s%s%s.txt" %(myid,
6011                                                                numprocs,
6012                                                                extra_info))
6013    screen_error_name = join(dir_name,  "screen_error%s%s%s.txt" %(myid,
6014                                                              numprocs,
6015                                                              extra_info))
6016
6017    if verbose: print 'Starting ScreenCatcher, all output will be stored in %s' \
6018                                     %(screen_output_name)
6019    #used to catch screen output to file
6020    sys.stdout = Screen_Catcher(screen_output_name)
6021    sys.stderr = Screen_Catcher(screen_error_name)
6022
6023class Screen_Catcher:
6024    """this simply catches the screen output and stores it to file defined by
6025    start_screen_catcher (above)
6026    """
6027   
6028    def __init__(self, filename):
6029        self.filename = filename
6030#        print 'init'
6031        if exists(self.filename)is True:
6032            print'Old existing file "%s" has been deleted' %(self.filename)
6033            remove(self.filename)
6034
6035    def write(self, stuff):
6036        fid = open(self.filename, 'a')
6037        fid.write(stuff)
6038        fid.close()
6039       
6040# FIXME (DSG): Add unit test, make general, not just 2 files,
6041# but any number of files.
6042def copy_code_files(dir_name, filename1, filename2=None):
6043    """Copies "filename1" and "filename2" to "dir_name". Very useful for
6044    information management
6045    filename1 and filename2 are both absolute pathnames   
6046    """
6047
6048    if access(dir_name,F_OK) == 0:
6049        print 'Make directory %s' %dir_name
6050        mkdir (dir_name,0777)
6051    shutil.copy(filename1, dir_name + sep + basename(filename1))
6052    if filename2!=None:
6053        shutil.copy(filename2, dir_name + sep + basename(filename2))
6054        print 'Files %s and %s copied' %(filename1, filename2)
6055    else:
6056        print 'File %s copied' %(filename1)
6057
6058def get_data_from_file(filename,separator_value = ','):
6059    """
6060    Read in data information from file and
6061   
6062    Returns:
6063        header_fields, a string? of the first line separated
6064        by the 'separator_value'
6065       
6066        data, a array (N data columns X M lines) in the file
6067        excluding the header
6068       
6069    NOTE: wont deal with columns with different lenghts and there must be
6070    no blank lines at the end.
6071    """
6072   
6073    fid = open(filename)
6074    lines = fid.readlines()
6075   
6076    fid.close()
6077   
6078    header_line = lines[0]
6079    header_fields = header_line.split(separator_value)
6080
6081    #array to store data, number in there is to allow float...
6082    #i'm sure there is a better way!
6083    data=array([],typecode=Float)
6084    data=resize(data,((len(lines)-1),len(header_fields)))
6085#    print 'number of fields',range(len(header_fields))
6086#    print 'number of lines',len(lines), shape(data)
6087#    print'data',data[1,1],header_line
6088
6089    array_number = 0
6090    line_number = 1
6091    while line_number < (len(lines)):
6092        for i in range(len(header_fields)): 
6093            #this get line below the header, explaining the +1
6094            #and also the line_number can be used as the array index
6095            fields = lines[line_number].split(separator_value)
6096            #assign to array
6097            data[array_number,i] = float(fields[i])
6098           
6099        line_number = line_number +1
6100        array_number = array_number +1
6101       
6102    return header_fields, data
6103
6104def store_parameters(verbose=False,**kwargs):
6105    """
6106    Store "kwargs" into a temp csv file, if "completed" is a kwargs csv file is
6107    kwargs[file_name] else it is kwargs[output_dir] + details_temp.csv
6108   
6109    Must have a file_name keyword arg, this is what is writing to.
6110    might be a better way to do this using CSV module Writer and writeDict
6111   
6112    writes file to "output_dir" unless "completed" is in kwargs, then
6113    it writes to "file_name" kwargs
6114
6115    """
6116    import types
6117#    import os
6118   
6119    # Check that kwargs is a dictionary
6120    if type(kwargs) != types.DictType:
6121        raise TypeError
6122   
6123    #is completed is kwargs?
6124    try:
6125        kwargs['completed']
6126        completed=True
6127    except:
6128        completed=False
6129 
6130    #get file name and removes from dict and assert that a file_name exists
6131    if completed:
6132        try:
6133            file = str(kwargs['file_name'])
6134        except:
6135            raise 'kwargs must have file_name'
6136    else:
6137        #write temp file in output directory
6138        try:
6139            file = str(kwargs['output_dir'])+'detail_temp.csv'
6140        except:
6141            raise 'kwargs must have output_dir'
6142       
6143    #extracts the header info and the new line info
6144    line=''
6145    header=''
6146    count=0
6147    keys = kwargs.keys()
6148    keys.sort()
6149   
6150    #used the sorted keys to create the header and line data
6151    for k in keys:
6152#        print "%s = %s" %(k, kwargs[k])
6153        header = header+str(k)
6154        line = line+str(kwargs[k])
6155        count+=1
6156        if count <len(kwargs):
6157            header = header+','
6158            line = line+','
6159    header+='\n'
6160    line+='\n'
6161
6162    # checks the header info, if the same, then write, if not create a new file
6163    #try to open!
6164    try:
6165        fid = open(file,"r")
6166        file_header=fid.readline()
6167        fid.close()
6168        if verbose: print 'read file header %s' %file_header
6169       
6170    except:
6171        msg = 'try to create new file',file
6172        if verbose: print msg
6173        #tries to open file, maybe directory is bad
6174        try:
6175            fid = open(file,"w")
6176            fid.write(header)
6177            fid.close()
6178            file_header=header
6179        except:
6180            msg = 'cannot create new file',file
6181            raise msg
6182           
6183    #if header is same or this is a new file
6184    if file_header==str(header):
6185        fid=open(file,"a")
6186        #write new line
6187        fid.write(line)
6188        fid.close()
6189    else:
6190        #backup plan,
6191        # if header is different and has completed will append info to
6192        #end of details_temp.cvs file in output directory
6193        file = str(kwargs['output_dir'])+'detail_temp.csv'
6194        fid=open(file,"a")
6195        fid.write(header)
6196        fid.write(line)
6197        fid.close()
6198        if verbose: print 'file',file_header.strip('\n')
6199        if verbose: print 'head',header.strip('\n')
6200        if file_header.strip('\n')==str(header): print 'they equal'
6201        msg = 'WARNING: File header does not match input info, the input variables have changed, suggest to change file name'
6202        print msg
6203
6204
6205
6206# ----------------------------------------------
6207# Functions to obtain diagnostics from sww files
6208#-----------------------------------------------
6209
6210def get_mesh_and_quantities_from_file(filename,
6211                                      quantities=None,
6212                                      verbose=False):
6213    """Get and rebuild mesh structure and the associated quantities from sww file
6214
6215    Input:
6216        filename - Name os sww file
6217        quantities - Names of quantities to load
6218
6219    Output:
6220        mesh - instance of class Interpolate
6221               (including mesh and interpolation functionality)
6222        quantities - arrays with quantity values at each mesh node
6223        time - vector of stored timesteps
6224    """
6225 
6226    # FIXME (Ole): Maybe refactor filefunction using this more fundamental code.
6227   
6228    import types
6229    from Scientific.IO.NetCDF import NetCDFFile
6230    from shallow_water import Domain
6231    from Numeric import asarray, transpose, resize
6232    from anuga.abstract_2d_finite_volumes.neighbour_mesh import Mesh
6233
6234    if verbose: print 'Reading from ', filename
6235    fid = NetCDFFile(filename, 'r')    # Open existing file for read
6236    time = fid.variables['time'][:]    # Time vector
6237    time += fid.starttime[0]
6238   
6239    # Get the variables as Numeric arrays
6240    x = fid.variables['x'][:]                   # x-coordinates of nodes
6241    y = fid.variables['y'][:]                   # y-coordinates of nodes
6242    elevation = fid.variables['elevation'][:]   # Elevation
6243    stage = fid.variables['stage'][:]           # Water level
6244    xmomentum = fid.variables['xmomentum'][:]   # Momentum in the x-direction
6245    ymomentum = fid.variables['ymomentum'][:]   # Momentum in the y-direction
6246
6247
6248    # Mesh (nodes (Mx2), triangles (Nx3))
6249    nodes = concatenate( (x[:, NewAxis], y[:, NewAxis]), axis=1 )
6250    triangles = fid.variables['volumes'][:]
6251   
6252    # Get geo_reference
6253    try:
6254        geo_reference = Geo_reference(NetCDFObject=fid)
6255    except: #AttributeError, e:
6256        # Sww files don't have to have a geo_ref
6257        geo_reference = None
6258
6259    if verbose: print '    building mesh from sww file %s' %filename
6260    boundary = None
6261
6262    #FIXME (Peter Row): Should this be in mesh?
6263    if fid.smoothing != 'Yes':
6264        nodes = nodes.tolist()
6265        triangles = triangles.tolist()
6266        nodes, triangles, boundary=weed(nodes, triangles, boundary)
6267
6268
6269    try:
6270        mesh = Mesh(nodes, triangles, boundary,
6271                    geo_reference=geo_reference)
6272    except AssertionError, e:
6273        fid.close()
6274        msg = 'Domain could not be created: %s. "' %e
6275        raise DataDomainError, msg
6276
6277
6278    quantities = {}
6279    quantities['elevation'] = elevation
6280    quantities['stage'] = stage   
6281    quantities['xmomentum'] = xmomentum
6282    quantities['ymomentum'] = ymomentum   
6283
6284    fid.close()
6285    return mesh, quantities, time
6286
6287
6288
6289def get_flow_through_cross_section(filename,
6290                                   polyline,
6291                                   verbose=False):
6292    """Obtain flow (m^3/s) perpendicular to specified cross section.
6293
6294    Inputs:
6295        filename: Name of sww file
6296        polyline: Representation of desired cross section - it may contain multiple
6297                  sections allowing for complex shapes. Assume absolute UTM coordinates.
6298                  Format [[x0, y0], [x1, y1], ...]
6299
6300    Output:
6301        time: All stored times in sww file
6302        Q: Hydrograph of total flow across given segments for all stored times.
6303
6304    The normal flow is computed for each triangle intersected by the polyline and
6305    added up.  Multiple segments at different angles are specified the normal flows
6306    may partially cancel each other.
6307
6308    The typical usage of this function would be to get flow through a channel,
6309    and the polyline would then be a cross section perpendicular to the flow.
6310
6311    """
6312
6313    from anuga.fit_interpolate.interpolate import Interpolation_function
6314
6315    quantity_names =['elevation',
6316                     'stage',
6317                     'xmomentum',
6318                     'ymomentum']
6319
6320    # Get mesh and quantities from sww file
6321    X = get_mesh_and_quantities_from_file(filename,
6322                                          quantities=quantity_names,
6323                                          verbose=verbose)
6324    mesh, quantities, time = X
6325
6326
6327    # Adjust polyline to mesh spatial origin
6328    polyline = mesh.geo_reference.get_relative(polyline)
6329   
6330    # Find all intersections and associated triangles.
6331    segments = mesh.get_intersecting_segments(polyline, verbose=verbose)
6332    #print
6333    #for x in segments:
6334    #    print x
6335
6336   
6337    # Then store for each triangle the length of the intersecting segment(s),
6338    # right hand normal(s) and midpoints as interpolation_points.
6339    interpolation_points = []
6340    for segment in segments:
6341        midpoint = sum(array(segment.segment))/2
6342        interpolation_points.append(midpoint)
6343
6344
6345    if verbose:
6346        print 'Interpolating - ',       
6347        print 'total number of interpolation points = %d'\
6348              %len(interpolation_points)
6349
6350    I = Interpolation_function(time,
6351                               quantities,
6352                               quantity_names=quantity_names,
6353                               vertex_coordinates=mesh.nodes,
6354                               triangles=mesh.triangles,
6355                               interpolation_points=interpolation_points,
6356                               verbose=verbose)
6357
6358    if verbose: print 'Computing hydrograph'
6359    # Compute hydrograph
6360    Q = []
6361    for t in time:
6362        total_flow=0
6363        for i, p in enumerate(interpolation_points):
6364            elevation, stage, uh, vh = I(t, point_id=i)
6365            normal = segments[i].normal
6366
6367            # Inner product of momentum vector with segment normal [m^2/s]
6368            normal_momentum = uh*normal[0] + vh*normal[1] 
6369
6370            # Flow across this segment [m^3/s]
6371            segment_flow = normal_momentum*segments[i].length
6372
6373            # Accumulate
6374            total_flow += segment_flow
6375             
6376
6377        # Store flow at this timestep   
6378        Q.append(total_flow)
6379   
6380
6381
6382    return time, Q
6383   
6384
6385
6386
6387
6388def get_maximum_inundation_elevation(filename,
6389                                     polygon=None,
6390                                     time_interval=None,
6391                                     verbose=False):
6392   
6393    """Return highest elevation where depth > 0
6394   
6395    Usage:
6396    max_runup = get_maximum_inundation_elevation(filename,
6397                                                 polygon=None,
6398                                                 time_interval=None,
6399                                                 verbose=False)
6400
6401    filename is a NetCDF sww file containing ANUGA model output.   
6402    Optional arguments polygon and time_interval restricts the maximum
6403    runup calculation
6404    to a points that lie within the specified polygon and time interval.
6405
6406    If no inundation is found within polygon and time_interval the return value
6407    is None signifying "No Runup" or "Everything is dry".
6408
6409    See general function get_maximum_inundation_data for details.
6410   
6411    """
6412   
6413    runup, _ = get_maximum_inundation_data(filename,
6414                                           polygon=polygon,
6415                                           time_interval=time_interval,
6416                                           verbose=verbose)
6417    return runup
6418
6419
6420
6421
6422def get_maximum_inundation_location(filename,
6423                                    polygon=None,
6424                                    time_interval=None,
6425                                    verbose=False):
6426    """Return location of highest elevation where h > 0
6427   
6428   
6429    Usage:
6430    max_runup_location = get_maximum_inundation_location(filename,
6431                                                         polygon=None,
6432                                                         time_interval=None,
6433                                                         verbose=False)
6434
6435    filename is a NetCDF sww file containing ANUGA model output.
6436    Optional arguments polygon and time_interval restricts the maximum
6437    runup calculation
6438    to a points that lie within the specified polygon and time interval.
6439
6440    If no inundation is found within polygon and time_interval the return value
6441    is None signifying "No Runup" or "Everything is dry".
6442
6443    See general function get_maximum_inundation_data for details.
6444    """
6445   
6446    _, max_loc = get_maximum_inundation_data(filename,
6447                                             polygon=polygon,
6448                                             time_interval=time_interval,
6449                                             verbose=verbose)
6450    return max_loc
6451   
6452
6453
6454def get_maximum_inundation_data(filename, polygon=None, time_interval=None,
6455                                use_centroid_values=False,
6456                                verbose=False):
6457    """Compute maximum run up height from sww file.
6458
6459
6460    Usage:
6461    runup, location = get_maximum_inundation_data(filename,
6462                                                  polygon=None,
6463                                                  time_interval=None,
6464                                                  verbose=False)
6465   
6466
6467    Algorithm is as in get_maximum_inundation_elevation from
6468    shallow_water_domain
6469    except that this function works with the sww file and computes the maximal
6470    runup height over multiple timesteps.
6471   
6472    Optional arguments polygon and time_interval restricts the
6473    maximum runup calculation
6474    to a points that lie within the specified polygon and time interval.
6475    Polygon is
6476    assumed to be in (absolute) UTM coordinates in the same zone as domain.
6477
6478    If no inundation is found within polygon and time_interval the return value
6479    is None signifying "No Runup" or "Everything is dry".
6480    """
6481
6482    # We are using nodal values here as that is what is stored in sww files.
6483
6484    # Water depth below which it is considered to be 0 in the model
6485    # FIXME (Ole): Allow this to be specified as a keyword argument as well
6486
6487    from anuga.utilities.polygon import inside_polygon   
6488    from anuga.config import minimum_allowed_height
6489    from Scientific.IO.NetCDF import NetCDFFile
6490
6491    dir, base = os.path.split(filename)
6492           
6493    iterate_over = get_all_swwfiles(dir,base)
6494   
6495    # Read sww file
6496    if verbose: 
6497        print 'Reading from %s' %filename
6498        # FIXME: Use general swwstats (when done)
6499   
6500    maximal_runup = None
6501    maximal_runup_location = None
6502   
6503    for file, swwfile in enumerate (iterate_over):
6504       
6505        # Read sww file
6506        filename = join(dir,swwfile+'.sww')
6507       
6508        if verbose: 
6509            print 'Reading from %s' %filename
6510            # FIXME: Use general swwstats (when done)
6511               
6512        fid = NetCDFFile(filename)
6513   
6514        # Get geo_reference
6515        # sww files don't have to have a geo_ref
6516        try:
6517            geo_reference = Geo_reference(NetCDFObject=fid)
6518        except AttributeError, e:
6519            geo_reference = Geo_reference() # Default georef object
6520           
6521        xllcorner = geo_reference.get_xllcorner()
6522        yllcorner = geo_reference.get_yllcorner()
6523        zone = geo_reference.get_zone()
6524       
6525        # Get extent
6526        volumes = fid.variables['volumes'][:]   
6527        x = fid.variables['x'][:] + xllcorner
6528        y = fid.variables['y'][:] + yllcorner
6529   
6530   
6531        # Get the relevant quantities (Convert from single precison)
6532        elevation = array(fid.variables['elevation'][:], Float) 
6533        stage = array(fid.variables['stage'][:], Float)
6534   
6535   
6536        # Here's where one could convert nodal information to centroid
6537        # information
6538        # but is probably something we need to write in C.
6539        # Here's a Python thought which is NOT finished!!!
6540        if use_centroid_values is True:
6541            x = get_centroid_values(x, volumes)
6542            y = get_centroid_values(y, volumes)   
6543            elevation = get_centroid_values(elevation, volumes)   
6544   
6545   
6546        # Spatial restriction
6547        if polygon is not None:
6548            msg = 'polygon must be a sequence of points.'
6549            assert len(polygon[0]) == 2, msg
6550            # FIXME (Ole): Make a generic polygon input check in polygon.py
6551            # and call it here
6552           
6553            points = concatenate((x[:,NewAxis], y[:,NewAxis]), axis=1)
6554   
6555            point_indices = inside_polygon(points, polygon)
6556   
6557            # Restrict quantities to polygon
6558            elevation = take(elevation, point_indices)
6559            stage = take(stage, point_indices, axis=1)
6560   
6561            # Get info for location of maximal runup
6562            points_in_polygon = take(points, point_indices)
6563            x = points_in_polygon[:,0]
6564            y = points_in_polygon[:,1]       
6565        else:
6566            # Take all points
6567            point_indices = arange(len(x))
6568           
6569   
6570        # Temporal restriction
6571        time = fid.variables['time'][:]
6572        all_timeindices = arange(len(time))       
6573        if time_interval is not None:
6574           
6575            msg = 'time_interval must be a sequence of length 2.'
6576            assert len(time_interval) == 2, msg
6577            msg = 'time_interval %s must not be decreasing.' %(time_interval)
6578            assert time_interval[1] >= time_interval[0], msg
6579           
6580            msg = 'Specified time interval [%.8f:%.8f]' %tuple(time_interval)
6581            msg += ' must does not match model time interval: [%.8f, %.8f]\n'\
6582                   %(time[0], time[-1])
6583            if time_interval[1] < time[0]: raise ValueError(msg)
6584            if time_interval[0] > time[-1]: raise ValueError(msg)
6585   
6586            # Take time indices corresponding to interval (& is bitwise AND)
6587            timesteps = compress((time_interval[0] <= time) & (time <= time_interval[1]),
6588                                 all_timeindices)
6589   
6590   
6591            msg = 'time_interval %s did not include any model timesteps.' %(time_interval)       
6592            assert not alltrue(timesteps == 0), msg
6593   
6594   
6595        else:
6596            # Take them all
6597            timesteps = all_timeindices
6598       
6599   
6600        fid.close()
6601   
6602        # Compute maximal runup for each timestep
6603        #maximal_runup = None
6604        #maximal_runup_location = None
6605        #maximal_runups = [None]
6606        #maximal_runup_locations = [None]
6607       
6608        for i in timesteps:
6609            if use_centroid_values is True:
6610                stage_i = get_centroid_values(stage[i,:], volumes)   
6611            else:
6612                stage_i = stage[i,:]
6613               
6614            depth = stage_i  - elevation
6615       
6616            # Get wet nodes i.e. nodes with depth>0 within given region and timesteps
6617            wet_nodes = compress(depth > minimum_allowed_height, arange(len(depth)))
6618   
6619            if alltrue(wet_nodes == 0):
6620                runup = None
6621            else:   
6622                # Find maximum elevation among wet nodes
6623                wet_elevation = take(elevation, wet_nodes)
6624   
6625                runup_index = argmax(wet_elevation)
6626                runup = max(wet_elevation)
6627                assert wet_elevation[runup_index] == runup # Must be True
6628            #print "runup", runup
6629            #print "maximal_runup", maximal_runup
6630           
6631            if runup > maximal_runup:
6632                maximal_runup = runup      # This works even if maximal_runups is None
6633                #print "NEW RUNUP",runup
6634   
6635                # Record location
6636                wet_x = take(x, wet_nodes)
6637                wet_y = take(y, wet_nodes)           
6638                maximal_runup_location = [wet_x[runup_index], wet_y[runup_index]]
6639   
6640    #print 'maximal_runup, maximal_runup_location',maximal_runup, maximal_runup_location
6641    return maximal_runup, maximal_runup_location
6642
6643def get_all_swwfiles(look_in_dir='',base_name='',verbose=False):
6644    '''
6645    Finds all the sww files in a "look_in_dir" which contains a "base_name".
6646    will accept base_name with or without the extension ".sww"
6647   
6648    Returns: a list of strings
6649       
6650    Usage:     iterate_over = get_all_swwfiles(dir, name)
6651    then
6652               for swwfile in iterate_over:
6653                   do stuff
6654                   
6655    Check "export_grids" and "get_maximum_inundation_data" for examples
6656    '''
6657   
6658    #plus tests the extension
6659    name, extension = os.path.splitext(base_name)
6660
6661    if extension <>'' and extension <> '.sww':
6662        msg = msg='file %s %s must be an NetCDF sww file!'%(base_name,extension)
6663        raise IOError, msg
6664
6665    if look_in_dir == "":
6666        look_in_dir = "." # Unix compatibility
6667   
6668    dir_ls = os.listdir(look_in_dir)
6669    #print 'dir_ls',dir_ls, base
6670    iterate_over = [x[:-4] for x in dir_ls if name in x and x[-4:] == '.sww']
6671    if len(iterate_over) == 0:
6672        msg = 'No files of the base name %s'\
6673              %(name)
6674        raise IOError, msg
6675    if verbose: print 'iterate over %s' %(iterate_over)
6676
6677    return iterate_over
6678
6679def get_all_files_with_extension(look_in_dir='',base_name='',extension='.sww',verbose=False):
6680    '''
6681    Finds all the sww files in a "look_in_dir" which contains a "base_name".
6682   
6683   
6684    Returns: a list of strings
6685       
6686    Usage:     iterate_over = get_all_swwfiles(dir, name)
6687    then
6688               for swwfile in iterate_over:
6689                   do stuff
6690                   
6691    Check "export_grids" and "get_maximum_inundation_data" for examples
6692    '''
6693   
6694    #plus tests the extension
6695    name, ext = os.path.splitext(base_name)
6696#    print 'look_in_dir',look_in_dir
6697
6698    if ext <>'' and ext <> extension:
6699        msg = msg='base_name %s must be an file with %s extension!'%(base_name,extension)
6700        raise IOError, msg
6701
6702    if look_in_dir == "":
6703        look_in_dir = "." # Unix compatibility
6704#    print 'look_in_dir',look_in_dir, getcwd()
6705    dir_ls = os.listdir(look_in_dir)
6706    #print 'dir_ls',dir_ls, base_name
6707    iterate_over = [x[:-4] for x in dir_ls if name in x and x[-4:] == extension]
6708    if len(iterate_over) == 0:
6709        msg = 'No files of the base name %s in %s'\
6710              %(name, look_in_dir)
6711        raise IOError, msg
6712    if verbose: print 'iterate over %s' %(iterate_over)
6713
6714    return iterate_over
6715
6716def get_all_directories_with_name(look_in_dir='',base_name='',verbose=False):
6717    '''
6718    Finds all the sww files in a "look_in_dir" which contains a "base_name".
6719   
6720   
6721    Returns: a list of strings
6722       
6723    Usage:     iterate_over = get_all_swwfiles(dir, name)
6724    then
6725               for swwfile in iterate_over:
6726                   do stuff
6727                   
6728    Check "export_grids" and "get_maximum_inundation_data" for examples
6729    '''
6730   
6731    #plus tests the extension
6732
6733    if look_in_dir == "":
6734        look_in_dir = "." # Unix compatibility
6735#    print 'look_in_dir',look_in_dir
6736    dir_ls = os.listdir(look_in_dir)
6737#    print 'dir_ls',dir_ls
6738    iterate_over = [x for x in dir_ls if base_name in x]
6739    if len(iterate_over) == 0:
6740        msg = 'No files of the base name %s'\
6741              %(name)
6742        raise IOError, msg
6743    if verbose: print 'iterate over %s' %(iterate_over)
6744
6745    return iterate_over
6746
6747def points2polygon(points_file,
6748                    minimum_triangle_angle=3.0):
6749    """
6750    WARNING: This function is not fully working. 
6751   
6752    Function to return a polygon returned from alpha shape, given a points file.
6753   
6754    WARNING: Alpha shape returns multiple polygons, but this function only returns one polygon.
6755   
6756    """
6757    from anuga.pmesh.mesh import Mesh, importMeshFromFile
6758    from anuga.shallow_water import Domain   
6759    from anuga.pmesh.mesh_interface import create_mesh_from_regions
6760   
6761    mesh = importMeshFromFile(points_file)
6762    mesh.auto_segment()
6763    mesh.exportASCIIsegmentoutlinefile("outline.tsh")
6764    mesh2 = importMeshFromFile("outline.tsh")
6765    mesh2.generate_mesh(maximum_triangle_area=1000000000, minimum_triangle_angle=minimum_triangle_angle, verbose=False)
6766    mesh2.export_mesh_file('outline_meshed.tsh')
6767    domain = Domain("outline_meshed.tsh", use_cache = False)
6768    polygon =  domain.get_boundary_polygon()
6769    return polygon
6770
6771#-------------------------------------------------------------
6772if __name__ == "__main__":
6773    #setting umask from config to force permissions for all files and directories
6774    # created to the same. (it was noticed the "mpirun" doesn't honour the umask
6775    # set in your .bashrc etc file)
6776    from config import umask
6777    import os 
6778    os.umask(umask)
Note: See TracBrowser for help on using the repository browser.