source: anuga_core/source/anuga/shallow_water/data_manager.py @ 5589

Last change on this file since 5589 was 5589, checked in by sexton, 16 years ago

unit tests for comparing urs gauges to sts outputs

File size: 221.6 KB
Line 
1"""datamanager.py - input output for AnuGA
2
3
4This module takes care of reading and writing datafiles such as topograhies,
5model output, etc
6
7
8Formats used within AnuGA:
9
10.sww: Netcdf format for storing model output f(t,x,y)
11.tms: Netcdf format for storing time series f(t)
12
13.csv: ASCII format for storing arbitrary points and associated attributes
14.pts: NetCDF format for storing arbitrary points and associated attributes
15
16.asc: ASCII format of regular DEMs as output from ArcView
17.prj: Associated ArcView file giving more meta data for asc format
18.ers: ERMapper header format of regular DEMs for ArcView
19
20.dem: NetCDF representation of regular DEM data
21
22.tsh: ASCII format for storing meshes and associated boundary and region info
23.msh: NetCDF format for storing meshes and associated boundary and region info
24
25.nc: Native ferret NetCDF format
26.geo: Houdinis ascii geometry format (?)
27
28
29A typical dataflow can be described as follows
30
31Manually created files:
32ASC, PRJ:     Digital elevation models (gridded)
33TSH:          Triangular meshes (e.g. created from anuga.pmesh)
34NC            Model outputs for use as boundary conditions (e.g from MOST)
35
36
37AUTOMATICALLY CREATED FILES:
38
39ASC, PRJ  ->  DEM  ->  PTS: Conversion of DEM's to native pts file
40
41NC -> SWW: Conversion of MOST bundary files to boundary sww
42
43PTS + TSH -> TSH with elevation: Least squares fit
44
45TSH -> SWW:  Conversion of TSH to sww viewable using Swollen
46
47TSH + Boundary SWW -> SWW: Simluation using abstract_2d_finite_volumes
48
49"""
50
51# This file was reverted from changeset:5484 to changeset:5470 on 10th July
52# by Ole.
53
54import exceptions
55class TitleValueError(exceptions.Exception): pass
56class DataMissingValuesError(exceptions.Exception): pass
57class DataFileNotOpenError(exceptions.Exception): pass
58class DataTimeError(exceptions.Exception): pass
59class DataDomainError(exceptions.Exception): pass
60class NewQuantity(exceptions.Exception): pass
61
62
63
64import csv
65import os, sys
66import shutil
67from struct import unpack
68import array as p_array
69#import time, os
70from os import sep, path, remove, mkdir, access, F_OK, W_OK, getcwd
71
72
73from Numeric import concatenate, array, Float, Int, Int32, resize, \
74     sometrue, searchsorted, zeros, allclose, around, reshape, \
75     transpose, sort, NewAxis, ArrayType, compress, take, arange, \
76     argmax, alltrue, shape, Float32, size
77
78import string
79
80from Scientific.IO.NetCDF import NetCDFFile
81#from shutil import copy
82from os.path import exists, basename, join
83from os import getcwd
84
85
86from anuga.coordinate_transforms.redfearn import redfearn, \
87     convert_from_latlon_to_utm
88from anuga.coordinate_transforms.geo_reference import Geo_reference, \
89     write_NetCDF_georeference, ensure_geo_reference
90from anuga.geospatial_data.geospatial_data import Geospatial_data,\
91     ensure_absolute
92from anuga.config import minimum_storable_height as default_minimum_storable_height
93from anuga.config import max_float
94from anuga.utilities.numerical_tools import ensure_numeric,  mean
95from anuga.caching.caching import myhash
96from anuga.utilities.anuga_exceptions import ANUGAError
97from anuga.shallow_water import Domain
98from anuga.abstract_2d_finite_volumes.pmesh2domain import \
99     pmesh_to_domain_instance
100from anuga.abstract_2d_finite_volumes.util import get_revision_number, \
101     remove_lone_verts, sww2timeseries, get_centroid_values
102from anuga.load_mesh.loadASCII import export_mesh_file
103from anuga.utilities.polygon import intersection
104
105
106# formula mappings
107
108quantity_formula = {'momentum':'(xmomentum**2 + ymomentum**2)**0.5',
109                    'depth':'stage-elevation',
110                    'speed': \
111 '(xmomentum**2 + ymomentum**2)**0.5/(stage-elevation+1.e-6/(stage-elevation))'}
112
113
114   
115def make_filename(s):
116    """Transform argument string into a Sexsuitable filename
117    """
118
119    s = s.strip()
120    s = s.replace(' ', '_')
121    s = s.replace('(', '')
122    s = s.replace(')', '')
123    s = s.replace('__', '_')
124
125    return s
126
127
128def check_dir(path, verbose=None):
129    """Check that specified path exists.
130    If path does not exist it will be created if possible
131
132    USAGE:
133       checkdir(path, verbose):
134
135    ARGUMENTS:
136        path -- Directory
137        verbose -- Flag verbose output (default: None)
138
139    RETURN VALUE:
140        Verified path including trailing separator
141
142    """
143
144    import os.path
145
146    if sys.platform in ['nt', 'dos', 'win32', 'what else?']:
147        unix = 0
148    else:
149        unix = 1
150
151
152    if path[-1] != os.sep:
153        path = path + os.sep  # Add separator for directories
154
155    path = os.path.expanduser(path) # Expand ~ or ~user in pathname
156    if not (os.access(path,os.R_OK and os.W_OK) or path == ''):
157        try:
158            exitcode=os.mkdir(path)
159
160            # Change access rights if possible
161            #
162            if unix:
163                exitcode=os.system('chmod 775 '+path)
164            else:
165                pass  # FIXME: What about acces rights under Windows?
166
167            if verbose: print 'MESSAGE: Directory', path, 'created.'
168
169        except:
170            print 'WARNING: Directory', path, 'could not be created.'
171            if unix:
172                path = '/tmp/'
173            else:
174                path = 'C:'
175
176            print 'Using directory %s instead' %path
177
178    return(path)
179
180
181
182def del_dir(path):
183    """Recursively delete directory path and all its contents
184    """
185
186    import os
187
188    if os.path.isdir(path):
189        for file in os.listdir(path):
190            X = os.path.join(path, file)
191
192
193            if os.path.isdir(X) and not os.path.islink(X):
194                del_dir(X)
195            else:
196                try:
197                    os.remove(X)
198                except:
199                    print "Could not remove file %s" %X
200
201        os.rmdir(path)
202       
203       
204# ANOTHER OPTION, IF NEED IN THE FUTURE, Nick B 7/2007   
205def rmgeneric(path, __func__,verbose=False):
206    ERROR_STR= """Error removing %(path)s, %(error)s """
207
208    try:
209        __func__(path)
210        if verbose: print 'Removed ', path
211    except OSError, (errno, strerror):
212        print ERROR_STR % {'path' : path, 'error': strerror }
213           
214def removeall(path,verbose=False):
215
216    if not os.path.isdir(path):
217        return
218   
219    files=os.listdir(path)
220
221    for x in files:
222        fullpath=os.path.join(path, x)
223        if os.path.isfile(fullpath):
224            f=os.remove
225            rmgeneric(fullpath, f)
226        elif os.path.isdir(fullpath):
227            removeall(fullpath)
228            f=os.rmdir
229            rmgeneric(fullpath, f,verbose)
230
231
232
233def create_filename(datadir, filename, format, size=None, time=None):
234
235    import os
236    #from anuga.config import data_dir
237
238    FN = check_dir(datadir) + filename
239
240    if size is not None:
241        FN += '_size%d' %size
242
243    if time is not None:
244        FN += '_time%.2f' %time
245
246    FN += '.' + format
247    return FN
248
249
250def get_files(datadir, filename, format, size):
251    """Get all file (names) with given name, size and format
252    """
253
254    import glob
255
256    import os
257    #from anuga.config import data_dir
258
259    dir = check_dir(datadir)
260
261    pattern = dir + os.sep + filename + '_size=%d*.%s' %(size, format)
262    return glob.glob(pattern)
263
264
265
266#Generic class for storing output to e.g. visualisation or checkpointing
267class Data_format:
268    """Generic interface to data formats
269    """
270
271
272    def __init__(self, domain, extension, mode = 'w'):
273        assert mode in ['r', 'w', 'a'], '''Mode %s must be either:''' %mode +\
274                                        '''   'w' (write)'''+\
275                                        '''   'r' (read)''' +\
276                                        '''   'a' (append)'''
277
278        #Create filename
279        self.filename = create_filename(domain.get_datadir(),
280                                        domain.get_name(), extension)
281
282        #print 'F', self.filename
283        self.timestep = 0
284        self.domain = domain
285       
286
287
288        # Exclude ghosts in case this is a parallel domain
289        self.number_of_nodes = domain.number_of_full_nodes       
290        self.number_of_volumes = domain.number_of_full_triangles
291        #self.number_of_volumes = len(domain)       
292
293
294
295
296        #FIXME: Should we have a general set_precision function?
297
298
299
300#Class for storing output to e.g. visualisation
301class Data_format_sww(Data_format):
302    """Interface to native NetCDF format (.sww) for storing model output
303
304    There are two kinds of data
305
306    1: Constant data: Vertex coordinates and field values. Stored once
307    2: Variable data: Conserved quantities. Stored once per timestep.
308
309    All data is assumed to reside at vertex locations.
310    """
311
312
313    def __init__(self, domain, mode = 'w',\
314                 max_size = 2000000000,
315                 recursion = False):
316        from Scientific.IO.NetCDF import NetCDFFile
317        from Numeric import Int, Float, Float32
318
319        self.precision = Float32 #Use single precision for quantities
320        if hasattr(domain, 'max_size'):
321            self.max_size = domain.max_size #file size max is 2Gig
322        else:
323            self.max_size = max_size
324        self.recursion = recursion
325        self.mode = mode
326
327        Data_format.__init__(self, domain, 'sww', mode)
328
329        if hasattr(domain, 'minimum_storable_height'):
330            self.minimum_storable_height = domain.minimum_storable_height
331        else:
332            self.minimum_storable_height = default_minimum_storable_height
333
334        # NetCDF file definition
335        fid = NetCDFFile(self.filename, mode)
336
337        if mode == 'w':
338            description = 'Output from anuga.abstract_2d_finite_volumes suitable for plotting'
339            self.writer = Write_sww()
340            self.writer.store_header(fid,
341                                     domain.starttime,
342                                     self.number_of_volumes,
343                                     self.domain.number_of_full_nodes,
344                                     description=description,
345                                     smoothing=domain.smooth,
346                                     order=domain.default_order,
347                                     sww_precision=self.precision)
348
349            # Extra optional information
350            if hasattr(domain, 'texture'):
351                fid.texture = domain.texture
352
353            if domain.quantities_to_be_monitored is not None:
354                fid.createDimension('singleton', 1)
355                fid.createDimension('two', 2)               
356
357                poly = domain.monitor_polygon
358                if poly is not None:
359                    N = len(poly)
360                    fid.createDimension('polygon_length', N)
361                    fid.createVariable('extrema.polygon',
362                                       self.precision,
363                                       ('polygon_length',
364                                        'two'))
365                    fid.variables['extrema.polygon'][:] = poly                                   
366
367                   
368                interval = domain.monitor_time_interval
369                if interval is not None:
370                    fid.createVariable('extrema.time_interval',
371                                       self.precision,
372                                       ('two',))
373                    fid.variables['extrema.time_interval'][:] = interval
374
375               
376                for q in domain.quantities_to_be_monitored:
377                    #print 'doing', q
378                    fid.createVariable(q+'.extrema', self.precision,
379                                       ('numbers_in_range',))
380                    fid.createVariable(q+'.min_location', self.precision,
381                                       ('numbers_in_range',))
382                    fid.createVariable(q+'.max_location', self.precision,
383                                       ('numbers_in_range',))
384                    fid.createVariable(q+'.min_time', self.precision,
385                                       ('singleton',))
386                    fid.createVariable(q+'.max_time', self.precision,
387                                       ('singleton',))
388
389                   
390        fid.close()
391
392
393    def store_connectivity(self):
394        """Specialisation of store_connectivity for net CDF format
395
396        Writes x,y,z coordinates of triangles constituting
397        the bed elevation.
398        """
399
400        from Scientific.IO.NetCDF import NetCDFFile
401
402        from Numeric import concatenate, Int
403
404        domain = self.domain
405
406        #Get NetCDF
407        fid = NetCDFFile(self.filename, 'a')  #Open existing file for append
408
409        # Get the variables
410        x = fid.variables['x']
411        y = fid.variables['y']
412        z = fid.variables['elevation']
413
414        volumes = fid.variables['volumes']
415
416        # Get X, Y and bed elevation Z
417        Q = domain.quantities['elevation']
418        X,Y,Z,V = Q.get_vertex_values(xy=True,
419                                      precision=self.precision)
420
421        #
422        points = concatenate( (X[:,NewAxis],Y[:,NewAxis]), axis=1 )
423        self.writer.store_triangulation(fid,
424                                        points,
425                                        V.astype(volumes.typecode()),
426                                        Z,
427                                        points_georeference= \
428                                        domain.geo_reference)
429
430        # Close
431        fid.close()
432
433
434    def store_timestep(self, names=None):
435        """Store time and named quantities to file
436        """
437       
438        from Scientific.IO.NetCDF import NetCDFFile
439        import types
440        from time import sleep
441        from os import stat
442
443        from Numeric import choose
444
445
446        if names is None:
447            # Standard shallow water wave equation quantitites in ANUGA
448            names = ['stage', 'xmomentum', 'ymomentum']
449       
450        # Get NetCDF       
451        retries = 0
452        file_open = False
453        while not file_open and retries < 10:
454            try:
455                fid = NetCDFFile(self.filename, 'a') # Open existing file
456            except IOError:
457                # This could happen if someone was reading the file.
458                # In that case, wait a while and try again
459                msg = 'Warning (store_timestep): File %s could not be opened'\
460                      %self.filename
461                msg += ' - trying step %s again' %self.domain.time
462                print msg
463                retries += 1
464                sleep(1)
465            else:
466                file_open = True
467
468        if not file_open:
469            msg = 'File %s could not be opened for append' %self.filename
470            raise DataFileNotOpenError, msg
471
472
473
474        # Check to see if the file is already too big:
475        time = fid.variables['time']
476        i = len(time)+1
477        file_size = stat(self.filename)[6]
478        file_size_increase =  file_size/i
479        if file_size + file_size_increase > self.max_size*(2**self.recursion):
480            # In order to get the file name and start time correct,
481            # I change the domain.filename and domain.starttime.
482            # This is the only way to do this without changing
483            # other modules (I think).
484
485            # Write a filename addon that won't break swollens reader
486            # (10.sww is bad)
487            filename_ext = '_time_%s'%self.domain.time
488            filename_ext = filename_ext.replace('.', '_')
489           
490            # Remember the old filename, then give domain a
491            # name with the extension
492            old_domain_filename = self.domain.get_name()
493            if not self.recursion:
494                self.domain.set_name(old_domain_filename+filename_ext)
495
496
497            # Change the domain starttime to the current time
498            old_domain_starttime = self.domain.starttime
499            self.domain.starttime = self.domain.time
500
501            # Build a new data_structure.
502            next_data_structure=\
503                Data_format_sww(self.domain, mode=self.mode,\
504                                max_size = self.max_size,\
505                                recursion = self.recursion+1)
506            if not self.recursion:
507                print '    file_size = %s'%file_size
508                print '    saving file to %s'%next_data_structure.filename
509            #set up the new data_structure
510            self.domain.writer = next_data_structure
511
512            #FIXME - could be cleaner to use domain.store_timestep etc.
513            next_data_structure.store_connectivity()
514            next_data_structure.store_timestep(names)
515            fid.sync()
516            fid.close()
517
518            #restore the old starttime and filename
519            self.domain.starttime = old_domain_starttime
520            self.domain.set_name(old_domain_filename)           
521        else:
522            self.recursion = False
523            domain = self.domain
524
525            # Get the variables
526            time = fid.variables['time']
527            stage = fid.variables['stage']
528            xmomentum = fid.variables['xmomentum']
529            ymomentum = fid.variables['ymomentum']
530            i = len(time)
531            if type(names) not in [types.ListType, types.TupleType]:
532                names = [names]
533
534            if 'stage' in names and 'xmomentum' in names and \
535               'ymomentum' in names:
536
537                # Get stage, elevation, depth and select only those
538                # values where minimum_storable_height is exceeded
539                Q = domain.quantities['stage']
540                A, _ = Q.get_vertex_values(xy = False,
541                                           precision = self.precision)
542                z = fid.variables['elevation']
543
544                storable_indices = A-z[:] >= self.minimum_storable_height
545                stage = choose(storable_indices, (z[:], A))
546               
547                # Define a zero vector of same size and type as A
548                # for use with momenta
549                null = zeros(size(A), A.typecode())
550               
551                # Get xmomentum where depth exceeds minimum_storable_height
552                Q = domain.quantities['xmomentum']
553                xmom, _ = Q.get_vertex_values(xy = False,
554                                              precision = self.precision)
555                xmomentum = choose(storable_indices, (null, xmom))
556               
557
558                # Get ymomentum where depth exceeds minimum_storable_height
559                Q = domain.quantities['ymomentum']
560                ymom, _ = Q.get_vertex_values(xy = False,
561                                              precision = self.precision)
562                ymomentum = choose(storable_indices, (null, ymom))               
563               
564                # Write quantities to NetCDF
565                self.writer.store_quantities(fid, 
566                                             time=self.domain.time,
567                                             sww_precision=self.precision,
568                                             stage=stage,
569                                             xmomentum=xmomentum,
570                                             ymomentum=ymomentum)
571            else:
572                msg = 'Quantities stored must be: stage, xmomentum, ymomentum.'
573                msg += ' Instead I got: ' + str(names)
574                raise Exception, msg
575           
576
577
578            # Update extrema if requested
579            domain = self.domain
580            if domain.quantities_to_be_monitored is not None:
581                for q, info in domain.quantities_to_be_monitored.items():
582
583                    if info['min'] is not None:
584                        fid.variables[q + '.extrema'][0] = info['min']
585                        fid.variables[q + '.min_location'][:] =\
586                                        info['min_location']
587                        fid.variables[q + '.min_time'][0] = info['min_time']
588                       
589                    if info['max'] is not None:
590                        fid.variables[q + '.extrema'][1] = info['max']
591                        fid.variables[q + '.max_location'][:] =\
592                                        info['max_location']
593                        fid.variables[q + '.max_time'][0] = info['max_time']
594
595           
596
597            # Flush and close
598            fid.sync()
599            fid.close()
600
601
602
603# Class for handling checkpoints data
604class Data_format_cpt(Data_format):
605    """Interface to native NetCDF format (.cpt)
606    """
607
608
609    def __init__(self, domain, mode = 'w'):
610        from Scientific.IO.NetCDF import NetCDFFile
611        from Numeric import Int, Float, Float
612
613        self.precision = Float #Use full precision
614
615        Data_format.__init__(self, domain, 'sww', mode)
616
617
618        # NetCDF file definition
619        fid = NetCDFFile(self.filename, mode)
620
621        if mode == 'w':
622            #Create new file
623            fid.institution = 'Geoscience Australia'
624            fid.description = 'Checkpoint data'
625            #fid.smooth = domain.smooth
626            fid.order = domain.default_order
627
628            # dimension definitions
629            fid.createDimension('number_of_volumes', self.number_of_volumes)
630            fid.createDimension('number_of_vertices', 3)
631
632            #Store info at all vertices (no smoothing)
633            fid.createDimension('number_of_points', 3*self.number_of_volumes)
634            fid.createDimension('number_of_timesteps', None) #extensible
635
636            # variable definitions
637
638            #Mesh
639            fid.createVariable('x', self.precision, ('number_of_points',))
640            fid.createVariable('y', self.precision, ('number_of_points',))
641
642
643            fid.createVariable('volumes', Int, ('number_of_volumes',
644                                                'number_of_vertices'))
645
646            fid.createVariable('time', self.precision,
647                               ('number_of_timesteps',))
648
649            #Allocate space for all quantities
650            for name in domain.quantities.keys():
651                fid.createVariable(name, self.precision,
652                                   ('number_of_timesteps',
653                                    'number_of_points'))
654
655        #Close
656        fid.close()
657
658
659    def store_checkpoint(self):
660        """
661        Write x,y coordinates of triangles.
662        Write connectivity (
663        constituting
664        the bed elevation.
665        """
666
667        from Scientific.IO.NetCDF import NetCDFFile
668
669        from Numeric import concatenate
670
671        domain = self.domain
672
673        #Get NetCDF
674        fid = NetCDFFile(self.filename, 'a')  #Open existing file for append
675
676        # Get the variables
677        x = fid.variables['x']
678        y = fid.variables['y']
679
680        volumes = fid.variables['volumes']
681
682        # Get X, Y and bed elevation Z
683        Q = domain.quantities['elevation']
684        X,Y,Z,V = Q.get_vertex_values(xy=True,
685                      precision = self.precision)
686
687
688
689        x[:] = X.astype(self.precision)
690        y[:] = Y.astype(self.precision)
691        z[:] = Z.astype(self.precision)
692
693        volumes[:] = V
694
695        #Close
696        fid.close()
697
698
699    def store_timestep(self, name):
700        """Store time and named quantity to file
701        """
702        from Scientific.IO.NetCDF import NetCDFFile
703        from time import sleep
704
705        #Get NetCDF
706        retries = 0
707        file_open = False
708        while not file_open and retries < 10:
709            try:
710                fid = NetCDFFile(self.filename, 'a') #Open existing file
711            except IOError:
712                #This could happen if someone was reading the file.
713                #In that case, wait a while and try again
714                msg = 'Warning (store_timestep): File %s could not be opened'\
715                  %self.filename
716                msg += ' - trying again'
717                print msg
718                retries += 1
719                sleep(1)
720            else:
721                file_open = True
722
723        if not file_open:
724            msg = 'File %s could not be opened for append' %self.filename
725            raise DataFileNotOPenError, msg
726
727
728        domain = self.domain
729
730        # Get the variables
731        time = fid.variables['time']
732        stage = fid.variables['stage']
733        i = len(time)
734
735        #Store stage
736        time[i] = self.domain.time
737
738        # Get quantity
739        Q = domain.quantities[name]
740        A,V = Q.get_vertex_values(xy=False,
741                                  precision = self.precision)
742
743        stage[i,:] = A.astype(self.precision)
744
745        #Flush and close
746        fid.sync()
747        fid.close()
748
749
750#### NED is national exposure database (name changed to NEXIS)
751   
752LAT_TITLE = 'LATITUDE'
753LONG_TITLE = 'LONGITUDE'
754X_TITLE = 'x'
755Y_TITLE = 'y'
756class Exposure_csv:
757    def __init__(self,file_name, latitude_title=LAT_TITLE,
758                 longitude_title=LONG_TITLE, is_x_y_locations=None,
759                 x_title=X_TITLE, y_title=Y_TITLE,
760                 refine_polygon=None, title_check_list=None):
761        """
762        This class is for handling the exposure csv file.
763        It reads the file in and converts the lats and longs to a geospatial
764        data object.
765        Use the methods to read and write columns.
766
767        The format of the csv files it reads is;
768           The first row is a title row.
769           comma's are the delimiters
770           each column is a 'set' of data
771
772        Feel free to use/expand it to read other csv files.
773           
774           
775        It is not for adding and deleting rows
776       
777        Can geospatial handle string attributes? It's not made for them.
778        Currently it can't load and save string att's.
779
780        So just use geospatial to hold the x, y and georef? Bad, since
781        different att's are in diferent structures.  Not so bad, the info
782        to write if the .csv file is saved is in attribute_dic
783
784        The location info is in the geospatial attribute.
785       
786       
787        """
788        self._file_name = file_name
789        self._geospatial = None #
790
791        # self._attribute_dic is a dictionary.
792        #The keys are the column titles.
793        #The values are lists of column data
794       
795        # self._title_index_dic is a dictionary.
796        #The keys are the column titles.
797        #The values are the index positions of file columns.
798        self._attribute_dic, self._title_index_dic = \
799            csv2dict(self._file_name, title_check_list=title_check_list)
800        try:
801            #Have code here that handles caps or lower
802            lats = self._attribute_dic[latitude_title]
803            longs = self._attribute_dic[longitude_title]
804           
805        except KeyError:
806            # maybe a warning..
807            #Let's see if this works..
808            if False != is_x_y_locations:
809                is_x_y_locations = True
810            pass
811        else:
812            self._geospatial = Geospatial_data(latitudes = lats,
813                 longitudes = longs)
814
815        if is_x_y_locations is True:
816            if self._geospatial is not None:
817                pass #fixme throw an error
818            try:
819                xs = self._attribute_dic[x_title]
820                ys = self._attribute_dic[y_title]
821                points = [[float(i),float(j)] for i,j in map(None,xs,ys)]
822            except KeyError:
823                # maybe a warning..
824                msg = "Could not find location information."
825                raise TitleValueError, msg
826            else:
827                self._geospatial = Geospatial_data(data_points=points)
828           
829        # create a list of points that are in the refining_polygon
830        # described by a list of indexes representing the points
831
832    def __cmp__(self, other):
833        #print "self._attribute_dic",self._attribute_dic
834        #print "other._attribute_dic",other._attribute_dic
835        #print "self._title_index_dic", self._title_index_dic
836        #print "other._title_index_dic", other._title_index_dic
837       
838        #check that a is an instance of this class
839        if isinstance(self, type(other)):
840            result = cmp(self._attribute_dic, other._attribute_dic)
841            if result <>0:
842                return result
843            # The order of the columns is important. Therefore..
844            result = cmp(self._title_index_dic, other._title_index_dic)
845            if result <>0:
846                return result
847            for self_ls, other_ls in map(None,self._attribute_dic, \
848                    other._attribute_dic):
849                result = cmp(self._attribute_dic[self_ls],
850                             other._attribute_dic[other_ls])
851                if result <>0:
852                    return result
853            return 0
854        else:
855            return 1
856   
857
858    def get_column(self, column_name, use_refind_polygon=False):
859        """
860        Given a column name return a list of the column values
861
862        Note, the type of the values will be String!
863        do this to change a list of strings to a list of floats
864        time = [float(x) for x in time]
865       
866        Not implemented:
867        if use_refind_polygon is True, only return values in the
868        refined polygon
869        """
870        if not self._attribute_dic.has_key(column_name):
871            msg = 'Therer is  no column called %s!' %column_name
872            raise TitleValueError, msg
873        return self._attribute_dic[column_name]
874
875
876    def get_value(self, value_column_name,
877                  known_column_name,
878                  known_values,
879                  use_refind_polygon=False):
880        """
881        Do linear interpolation on the known_colum, using the known_value,
882        to return a value of the column_value_name.
883        """
884        pass
885
886
887    def get_location(self, use_refind_polygon=False):
888        """
889        Return a geospatial object which describes the
890        locations of the location file.
891
892        Note, if there is not location info, this returns None.
893       
894        Not implemented:
895        if use_refind_polygon is True, only return values in the
896        refined polygon
897        """
898        return self._geospatial
899
900    def set_column(self, column_name, column_values, overwrite=False):
901        """
902        Add a column to the 'end' (with the right most column being the end)
903        of the csv file.
904
905        Set overwrite to True if you want to overwrite a column.
906       
907        Note, in column_name white space is removed and case is not checked.
908        Precondition
909        The column_name and column_values cannot have comma's in it.
910        """
911       
912        value_row_count = \
913            len(self._attribute_dic[self._title_index_dic.keys()[0]])
914        if len(column_values) <> value_row_count: 
915            msg = 'The number of column values must equal the number of rows.'
916            raise DataMissingValuesError, msg
917       
918        if self._attribute_dic.has_key(column_name):
919            if not overwrite:
920                msg = 'Column name %s already in use!' %column_name
921                raise TitleValueError, msg
922        else:
923            # New title.  Add it to the title index.
924            self._title_index_dic[column_name] = len(self._title_index_dic)
925        self._attribute_dic[column_name] = column_values
926        #print "self._title_index_dic[column_name]",self._title_index_dic
927
928    def save(self, file_name=None):
929        """
930        Save the exposure csv file
931        """
932        if file_name is None:
933            file_name = self._file_name
934       
935        fd = open(file_name,'wb')
936        writer = csv.writer(fd)
937       
938        #Write the title to a cvs file
939        line = [None]* len(self._title_index_dic)
940        for title in self._title_index_dic.iterkeys():
941            line[self._title_index_dic[title]]= title
942        writer.writerow(line)
943       
944        # Write the values to a cvs file
945        value_row_count = \
946            len(self._attribute_dic[self._title_index_dic.keys()[0]])
947        for row_i in range(value_row_count):
948            line = [None]* len(self._title_index_dic)
949            for title in self._title_index_dic.iterkeys():
950                line[self._title_index_dic[title]]= \
951                     self._attribute_dic[title][row_i]
952            writer.writerow(line)
953
954
955def csv2array(file_name):
956    """Convert CSV files of the form
957   
958    time, discharge, velocity
959    0.0,  1.2,       0.0
960    0.1,  3.2,       1.1
961    ...
962   
963    to a dictionary of numeric arrays.
964   
965   
966    See underlying function csv2dict for more details.
967   
968    """
969   
970   
971    X, _ = csv2dict(file_name)
972   
973    Y = {}
974    for key in X.keys():
975        Y[key] = array([float(x) for x in X[key]])
976       
977    return Y   
978   
979           
980def csv2dict(file_name, title_check_list=None):
981    """
982    Load in the csv as a dic, title as key and column info as value, .
983    Also, create a dic, title as key and column index as value,
984    to keep track of the column order.
985
986    Two dictionaries are returned.
987   
988    WARNING: Values are returned as strings.
989    do this to change a list of strings to a list of floats
990        time = [float(x) for x in time]
991
992       
993    """
994   
995    #
996    attribute_dic = {}
997    title_index_dic = {}
998    titles_stripped = [] # list of titles
999    reader = csv.reader(file(file_name))
1000
1001    # Read in and manipulate the title info
1002    titles = reader.next()
1003    for i,title in enumerate(titles):
1004        titles_stripped.append(title.strip())
1005        title_index_dic[title.strip()] = i
1006    title_count = len(titles_stripped)       
1007    #print "title_index_dic",title_index_dic
1008    if title_check_list is not None:
1009        for title_check in title_check_list:
1010            #msg = "Reading error.  This row is not present ", title_check
1011            #assert title_index_dic.has_key(title_check), msg
1012            if not title_index_dic.has_key(title_check):
1013                #reader.close()
1014                msg = "Reading error.  This row is not present ", \
1015                      title_check                     
1016                raise IOError, msg
1017               
1018   
1019   
1020    #create a dic of colum values, indexed by column title
1021    for line in reader:
1022        if len(line) <> title_count:
1023            raise IOError #FIXME make this nicer
1024        for i, value in enumerate(line):
1025            attribute_dic.setdefault(titles_stripped[i],[]).append(value)
1026       
1027    return attribute_dic, title_index_dic
1028
1029
1030#Auxiliary
1031def write_obj(filename,x,y,z):
1032    """Store x,y,z vectors into filename (obj format)
1033       Vectors are assumed to have dimension (M,3) where
1034       M corresponds to the number elements.
1035       triangles are assumed to be disconnected
1036
1037       The three numbers in each vector correspond to three vertices,
1038
1039       e.g. the x coordinate of vertex 1 of element i is in x[i,1]
1040
1041    """
1042    #print 'Writing obj to %s' % filename
1043
1044    import os.path
1045
1046    root, ext = os.path.splitext(filename)
1047    if ext == '.obj':
1048        FN = filename
1049    else:
1050        FN = filename + '.obj'
1051
1052
1053    outfile = open(FN, 'wb')
1054    outfile.write("# Triangulation as an obj file\n")
1055
1056    M, N = x.shape
1057    assert N==3  #Assuming three vertices per element
1058
1059    for i in range(M):
1060        for j in range(N):
1061            outfile.write("v %f %f %f\n" % (x[i,j],y[i,j],z[i,j]))
1062
1063    for i in range(M):
1064        base = i*N
1065        outfile.write("f %d %d %d\n" % (base+1,base+2,base+3))
1066
1067    outfile.close()
1068
1069
1070#########################################################
1071#Conversion routines
1072########################################################
1073
1074def sww2obj(basefilename, size):
1075    """Convert netcdf based data output to obj
1076    """
1077    from Scientific.IO.NetCDF import NetCDFFile
1078
1079    from Numeric import Float, zeros
1080
1081    #Get NetCDF
1082    FN = create_filename('.', basefilename, 'sww', size)
1083    print 'Reading from ', FN
1084    fid = NetCDFFile(FN, 'r')  #Open existing file for read
1085
1086
1087    # Get the variables
1088    x = fid.variables['x']
1089    y = fid.variables['y']
1090    z = fid.variables['elevation']
1091    time = fid.variables['time']
1092    stage = fid.variables['stage']
1093
1094    M = size  #Number of lines
1095    xx = zeros((M,3), Float)
1096    yy = zeros((M,3), Float)
1097    zz = zeros((M,3), Float)
1098
1099    for i in range(M):
1100        for j in range(3):
1101            xx[i,j] = x[i+j*M]
1102            yy[i,j] = y[i+j*M]
1103            zz[i,j] = z[i+j*M]
1104
1105    #Write obj for bathymetry
1106    FN = create_filename('.', basefilename, 'obj', size)
1107    write_obj(FN,xx,yy,zz)
1108
1109
1110    #Now read all the data with variable information, combine with
1111    #x,y info and store as obj
1112
1113    for k in range(len(time)):
1114        t = time[k]
1115        print 'Processing timestep %f' %t
1116
1117        for i in range(M):
1118            for j in range(3):
1119                zz[i,j] = stage[k,i+j*M]
1120
1121
1122        #Write obj for variable data
1123        #FN = create_filename(basefilename, 'obj', size, time=t)
1124        FN = create_filename('.', basefilename[:5], 'obj', size, time=t)
1125        write_obj(FN,xx,yy,zz)
1126
1127
1128def dat2obj(basefilename):
1129    """Convert line based data output to obj
1130    FIXME: Obsolete?
1131    """
1132
1133    import glob, os
1134    from anuga.config import data_dir
1135
1136
1137    #Get bathymetry and x,y's
1138    lines = open(data_dir+os.sep+basefilename+'_geometry.dat', 'r').readlines()
1139
1140    from Numeric import zeros, Float
1141
1142    M = len(lines)  #Number of lines
1143    x = zeros((M,3), Float)
1144    y = zeros((M,3), Float)
1145    z = zeros((M,3), Float)
1146
1147    ##i = 0
1148    for i, line in enumerate(lines):
1149        tokens = line.split()
1150        values = map(float,tokens)
1151
1152        for j in range(3):
1153            x[i,j] = values[j*3]
1154            y[i,j] = values[j*3+1]
1155            z[i,j] = values[j*3+2]
1156
1157        ##i += 1
1158
1159
1160    #Write obj for bathymetry
1161    write_obj(data_dir+os.sep+basefilename+'_geometry',x,y,z)
1162
1163
1164    #Now read all the data files with variable information, combine with
1165    #x,y info
1166    #and store as obj
1167
1168    files = glob.glob(data_dir+os.sep+basefilename+'*.dat')
1169
1170    for filename in files:
1171        print 'Processing %s' % filename
1172
1173        lines = open(data_dir+os.sep+filename,'r').readlines()
1174        assert len(lines) == M
1175        root, ext = os.path.splitext(filename)
1176
1177        #Get time from filename
1178        i0 = filename.find('_time=')
1179        if i0 == -1:
1180            #Skip bathymetry file
1181            continue
1182
1183        i0 += 6  #Position where time starts
1184        i1 = filename.find('.dat')
1185
1186        if i1 > i0:
1187            t = float(filename[i0:i1])
1188        else:
1189            raise DataTimeError, 'Hmmmm'
1190
1191
1192
1193        ##i = 0
1194        for i, line in enumerate(lines):
1195            tokens = line.split()
1196            values = map(float,tokens)
1197
1198            for j in range(3):
1199                z[i,j] = values[j]
1200
1201            ##i += 1
1202
1203        #Write obj for variable data
1204        write_obj(data_dir+os.sep+basefilename+'_time=%.4f' %t,x,y,z)
1205
1206
1207def filter_netcdf(filename1, filename2, first=0, last=None, step = 1):
1208    """Read netcdf filename1, pick timesteps first:step:last and save to
1209    nettcdf file filename2
1210    """
1211    from Scientific.IO.NetCDF import NetCDFFile
1212
1213    #Get NetCDF
1214    infile = NetCDFFile(filename1, 'r')  #Open existing file for read
1215    outfile = NetCDFFile(filename2, 'w')  #Open new file
1216
1217
1218    #Copy dimensions
1219    for d in infile.dimensions:
1220        outfile.createDimension(d, infile.dimensions[d])
1221
1222    for name in infile.variables:
1223        var = infile.variables[name]
1224        outfile.createVariable(name, var.typecode(), var.dimensions)
1225
1226
1227    #Copy the static variables
1228    for name in infile.variables:
1229        if name == 'time' or name == 'stage':
1230            pass
1231        else:
1232            #Copy
1233            outfile.variables[name][:] = infile.variables[name][:]
1234
1235    #Copy selected timesteps
1236    time = infile.variables['time']
1237    stage = infile.variables['stage']
1238
1239    newtime = outfile.variables['time']
1240    newstage = outfile.variables['stage']
1241
1242    if last is None:
1243        last = len(time)
1244
1245    selection = range(first, last, step)
1246    for i, j in enumerate(selection):
1247        print 'Copying timestep %d of %d (%f)' %(j, last-first, time[j])
1248        newtime[i] = time[j]
1249        newstage[i,:] = stage[j,:]
1250
1251    #Close
1252    infile.close()
1253    outfile.close()
1254
1255
1256#Get data objects
1257def get_dataobject(domain, mode='w'):
1258    """Return instance of class of given format using filename
1259    """
1260
1261    cls = eval('Data_format_%s' %domain.format)
1262    return cls(domain, mode)
1263
1264
1265
1266
1267def dem2pts(basename_in, basename_out=None,
1268            easting_min=None, easting_max=None,
1269            northing_min=None, northing_max=None,
1270            use_cache=False, verbose=False,):
1271    """Read Digitial Elevation model from the following NetCDF format (.dem)
1272
1273    Example:
1274
1275    ncols         3121
1276    nrows         1800
1277    xllcorner     722000
1278    yllcorner     5893000
1279    cellsize      25
1280    NODATA_value  -9999
1281    138.3698 137.4194 136.5062 135.5558 ..........
1282
1283    Convert to NetCDF pts format which is
1284
1285    points:  (Nx2) Float array
1286    elevation: N Float array
1287    """
1288
1289
1290
1291    kwargs = {'basename_out': basename_out,
1292              'easting_min': easting_min,
1293              'easting_max': easting_max,
1294              'northing_min': northing_min,
1295              'northing_max': northing_max,
1296              'verbose': verbose}
1297
1298    if use_cache is True:
1299        from caching import cache
1300        result = cache(_dem2pts, basename_in, kwargs,
1301                       dependencies = [basename_in + '.dem'],
1302                       verbose = verbose)
1303
1304    else:
1305        result = apply(_dem2pts, [basename_in], kwargs)
1306
1307    return result
1308
1309
1310def _dem2pts(basename_in, basename_out=None, verbose=False,
1311            easting_min=None, easting_max=None,
1312            northing_min=None, northing_max=None):
1313    """Read Digitial Elevation model from the following NetCDF format (.dem)
1314
1315    Internal function. See public function dem2pts for details.
1316    """
1317
1318    # FIXME: Can this be written feasibly using write_pts?
1319
1320    import os
1321    from Scientific.IO.NetCDF import NetCDFFile
1322    from Numeric import Float, zeros, reshape, sum
1323
1324    root = basename_in
1325
1326    # Get NetCDF
1327    infile = NetCDFFile(root + '.dem', 'r')  # Open existing netcdf file for read
1328
1329    if verbose: print 'Reading DEM from %s' %(root + '.dem')
1330
1331    ncols = infile.ncols[0]
1332    nrows = infile.nrows[0]
1333    xllcorner = infile.xllcorner[0]  # Easting of lower left corner
1334    yllcorner = infile.yllcorner[0]  # Northing of lower left corner
1335    cellsize = infile.cellsize[0]
1336    NODATA_value = infile.NODATA_value[0]
1337    dem_elevation = infile.variables['elevation']
1338
1339    zone = infile.zone[0]
1340    false_easting = infile.false_easting[0]
1341    false_northing = infile.false_northing[0]
1342
1343    # Text strings
1344    projection = infile.projection
1345    datum = infile.datum
1346    units = infile.units
1347
1348
1349    # Get output file
1350    if basename_out == None:
1351        ptsname = root + '.pts'
1352    else:
1353        ptsname = basename_out + '.pts'
1354
1355    if verbose: print 'Store to NetCDF file %s' %ptsname
1356    # NetCDF file definition
1357    outfile = NetCDFFile(ptsname, 'w')
1358
1359    # Create new file
1360    outfile.institution = 'Geoscience Australia'
1361    outfile.description = 'NetCDF pts format for compact and portable storage ' +\
1362                      'of spatial point data'
1363    # Assign default values
1364    if easting_min is None: easting_min = xllcorner
1365    if easting_max is None: easting_max = xllcorner + ncols*cellsize
1366    if northing_min is None: northing_min = yllcorner
1367    if northing_max is None: northing_max = yllcorner + nrows*cellsize
1368
1369    # Compute offsets to update georeferencing
1370    easting_offset = xllcorner - easting_min
1371    northing_offset = yllcorner - northing_min
1372
1373    # Georeferencing
1374    outfile.zone = zone
1375    outfile.xllcorner = easting_min # Easting of lower left corner
1376    outfile.yllcorner = northing_min # Northing of lower left corner
1377    outfile.false_easting = false_easting
1378    outfile.false_northing = false_northing
1379
1380    outfile.projection = projection
1381    outfile.datum = datum
1382    outfile.units = units
1383
1384
1385    # Grid info (FIXME: probably not going to be used, but heck)
1386    outfile.ncols = ncols
1387    outfile.nrows = nrows
1388
1389    dem_elevation_r = reshape(dem_elevation, (nrows, ncols))
1390    totalnopoints = nrows*ncols
1391
1392    # Calculating number of NODATA_values for each row in clipped region
1393    # FIXME: use array operations to do faster
1394    nn = 0
1395    k = 0
1396    i1_0 = 0
1397    j1_0 = 0
1398    thisj = 0
1399    thisi = 0
1400    for i in range(nrows):
1401        y = (nrows-i-1)*cellsize + yllcorner
1402        for j in range(ncols):
1403            x = j*cellsize + xllcorner
1404            if easting_min <= x <= easting_max and \
1405               northing_min <= y <= northing_max:
1406                thisj = j
1407                thisi = i
1408                if dem_elevation_r[i,j] == NODATA_value: nn += 1
1409
1410                if k == 0:
1411                    i1_0 = i
1412                    j1_0 = j
1413                k += 1
1414
1415    index1 = j1_0
1416    index2 = thisj
1417
1418    # Dimension definitions
1419    nrows_in_bounding_box = int(round((northing_max-northing_min)/cellsize))
1420    ncols_in_bounding_box = int(round((easting_max-easting_min)/cellsize))
1421
1422    clippednopoints = (thisi+1-i1_0)*(thisj+1-j1_0)
1423    nopoints = clippednopoints-nn
1424
1425    clipped_dem_elev = dem_elevation_r[i1_0:thisi+1,j1_0:thisj+1]
1426
1427    if verbose:
1428        print 'There are %d values in the elevation' %totalnopoints
1429        print 'There are %d values in the clipped elevation' %clippednopoints
1430        print 'There are %d NODATA_values in the clipped elevation' %nn
1431
1432    outfile.createDimension('number_of_points', nopoints)
1433    outfile.createDimension('number_of_dimensions', 2) #This is 2d data
1434
1435    # Variable definitions
1436    outfile.createVariable('points', Float, ('number_of_points',
1437                                             'number_of_dimensions'))
1438    outfile.createVariable('elevation', Float, ('number_of_points',))
1439
1440    # Get handles to the variables
1441    points = outfile.variables['points']
1442    elevation = outfile.variables['elevation']
1443
1444    lenv = index2-index1+1
1445    # Store data
1446    global_index = 0
1447    # for i in range(nrows):
1448    for i in range(i1_0,thisi+1,1):
1449        if verbose and i%((nrows+10)/10)==0:
1450            print 'Processing row %d of %d' %(i, nrows)
1451
1452        lower_index = global_index
1453
1454        v = dem_elevation_r[i,index1:index2+1]
1455        no_NODATA = sum(v == NODATA_value)
1456        if no_NODATA > 0:
1457            newcols = lenv - no_NODATA # ncols_in_bounding_box - no_NODATA
1458        else:
1459            newcols = lenv # ncols_in_bounding_box
1460
1461        telev = zeros(newcols, Float)
1462        tpoints = zeros((newcols, 2), Float)
1463
1464        local_index = 0
1465
1466        y = (nrows-i-1)*cellsize + yllcorner
1467        #for j in range(ncols):
1468        for j in range(j1_0,index2+1,1):
1469
1470            x = j*cellsize + xllcorner
1471            if easting_min <= x <= easting_max and \
1472               northing_min <= y <= northing_max and \
1473               dem_elevation_r[i,j] <> NODATA_value:
1474                tpoints[local_index, :] = [x-easting_min,y-northing_min]
1475                telev[local_index] = dem_elevation_r[i, j]
1476                global_index += 1
1477                local_index += 1
1478
1479        upper_index = global_index
1480
1481        if upper_index == lower_index + newcols:
1482            points[lower_index:upper_index, :] = tpoints
1483            elevation[lower_index:upper_index] = telev
1484
1485    assert global_index == nopoints, 'index not equal to number of points'
1486
1487    infile.close()
1488    outfile.close()
1489
1490
1491
1492def _read_hecras_cross_sections(lines):
1493    """Return block of surface lines for each cross section
1494    Starts with SURFACE LINE,
1495    Ends with END CROSS-SECTION
1496    """
1497
1498    points = []
1499
1500    reading_surface = False
1501    for i, line in enumerate(lines):
1502
1503        if len(line.strip()) == 0:    #Ignore blanks
1504            continue
1505
1506        if lines[i].strip().startswith('SURFACE LINE'):
1507            reading_surface = True
1508            continue
1509
1510        if lines[i].strip().startswith('END') and reading_surface:
1511            yield points
1512            reading_surface = False
1513            points = []
1514
1515        if reading_surface:
1516            fields = line.strip().split(',')
1517            easting = float(fields[0])
1518            northing = float(fields[1])
1519            elevation = float(fields[2])
1520            points.append([easting, northing, elevation])
1521
1522
1523
1524
1525def hecras_cross_sections2pts(basename_in,
1526                              basename_out=None,
1527                              verbose=False):
1528    """Read HEC-RAS Elevation datal from the following ASCII format (.sdf)
1529
1530    Example:
1531
1532
1533# RAS export file created on Mon 15Aug2005 11:42
1534# by HEC-RAS Version 3.1.1
1535
1536BEGIN HEADER:
1537  UNITS: METRIC
1538  DTM TYPE: TIN
1539  DTM: v:\1\cit\perth_topo\river_tin
1540  STREAM LAYER: c:\local\hecras\21_02_03\up_canning_cent3d.shp
1541  CROSS-SECTION LAYER: c:\local\hecras\21_02_03\up_can_xs3d.shp
1542  MAP PROJECTION: UTM
1543  PROJECTION ZONE: 50
1544  DATUM: AGD66
1545  VERTICAL DATUM:
1546  NUMBER OF REACHES:  19
1547  NUMBER OF CROSS-SECTIONS:  14206
1548END HEADER:
1549
1550
1551Only the SURFACE LINE data of the following form will be utilised
1552
1553  CROSS-SECTION:
1554    STREAM ID:Southern-Wungong
1555    REACH ID:Southern-Wungong
1556    STATION:19040.*
1557    CUT LINE:
1558      405548.671603161 , 6438142.7594925
1559      405734.536092045 , 6438326.10404912
1560      405745.130459356 , 6438331.48627354
1561      405813.89633823 , 6438368.6272789
1562    SURFACE LINE:
1563     405548.67,   6438142.76,   35.37
1564     405552.24,   6438146.28,   35.41
1565     405554.78,   6438148.78,   35.44
1566     405555.80,   6438149.79,   35.44
1567     405559.37,   6438153.31,   35.45
1568     405560.88,   6438154.81,   35.44
1569     405562.93,   6438156.83,   35.42
1570     405566.50,   6438160.35,   35.38
1571     405566.99,   6438160.83,   35.37
1572     ...
1573   END CROSS-SECTION
1574
1575    Convert to NetCDF pts format which is
1576
1577    points:  (Nx2) Float array
1578    elevation: N Float array
1579    """
1580
1581    import os
1582    from Scientific.IO.NetCDF import NetCDFFile
1583    from Numeric import Float, zeros, reshape
1584
1585    root = basename_in
1586
1587    #Get ASCII file
1588    infile = open(root + '.sdf', 'r')  #Open SDF file for read
1589
1590    if verbose: print 'Reading DEM from %s' %(root + '.sdf')
1591
1592    lines = infile.readlines()
1593    infile.close()
1594
1595    if verbose: print 'Converting to pts format'
1596
1597    i = 0
1598    while lines[i].strip() == '' or lines[i].strip().startswith('#'):
1599        i += 1
1600
1601    assert lines[i].strip().upper() == 'BEGIN HEADER:'
1602    i += 1
1603
1604    assert lines[i].strip().upper().startswith('UNITS:')
1605    units = lines[i].strip().split()[1]
1606    i += 1
1607
1608    assert lines[i].strip().upper().startswith('DTM TYPE:')
1609    i += 1
1610
1611    assert lines[i].strip().upper().startswith('DTM:')
1612    i += 1
1613
1614    assert lines[i].strip().upper().startswith('STREAM')
1615    i += 1
1616
1617    assert lines[i].strip().upper().startswith('CROSS')
1618    i += 1
1619
1620    assert lines[i].strip().upper().startswith('MAP PROJECTION:')
1621    projection = lines[i].strip().split(':')[1]
1622    i += 1
1623
1624    assert lines[i].strip().upper().startswith('PROJECTION ZONE:')
1625    zone = int(lines[i].strip().split(':')[1])
1626    i += 1
1627
1628    assert lines[i].strip().upper().startswith('DATUM:')
1629    datum = lines[i].strip().split(':')[1]
1630    i += 1
1631
1632    assert lines[i].strip().upper().startswith('VERTICAL DATUM:')
1633    i += 1
1634
1635    assert lines[i].strip().upper().startswith('NUMBER OF REACHES:')
1636    i += 1
1637
1638    assert lines[i].strip().upper().startswith('NUMBER OF CROSS-SECTIONS:')
1639    number_of_cross_sections = int(lines[i].strip().split(':')[1])
1640    i += 1
1641
1642
1643    #Now read all points
1644    points = []
1645    elevation = []
1646    for j, entries in enumerate(_read_hecras_cross_sections(lines[i:])):
1647        for k, entry in enumerate(entries):
1648            points.append(entry[:2])
1649            elevation.append(entry[2])
1650
1651
1652    msg = 'Actual #number_of_cross_sections == %d, Reported as %d'\
1653          %(j+1, number_of_cross_sections)
1654    assert j+1 == number_of_cross_sections, msg
1655
1656    #Get output file
1657    if basename_out == None:
1658        ptsname = root + '.pts'
1659    else:
1660        ptsname = basename_out + '.pts'
1661
1662    geo_ref = Geo_reference(zone, 0, 0, datum, projection, units)
1663    geo = Geospatial_data(points, {"elevation":elevation},
1664                          verbose=verbose, geo_reference=geo_ref)
1665    geo.export_points_file(ptsname)
1666
1667def export_grid(basename_in, extra_name_out = None,
1668                quantities = None, # defaults to elevation
1669                timestep = None,
1670                reduction = None,
1671                cellsize = 10,
1672                NODATA_value = -9999,
1673                easting_min = None,
1674                easting_max = None,
1675                northing_min = None,
1676                northing_max = None,
1677                verbose = False,
1678                origin = None,
1679                datum = 'WGS84',
1680                format = 'ers'):
1681    """
1682   
1683    Wrapper for sww2dem. - see sww2dem to find out what most of the
1684    parameters do.
1685
1686    Quantities is a list of quantities.  Each quantity will be
1687    calculated for each sww file.
1688
1689    This returns the basenames of the files returned, which is made up
1690    of the dir and all of the file name, except the extension.
1691
1692    This function returns the names of the files produced.
1693
1694    It will also produce as many output files as there are input sww files.
1695    """
1696   
1697    if quantities is None:
1698        quantities = ['elevation']
1699       
1700    if type(quantities) is str:
1701            quantities = [quantities]
1702
1703    # How many sww files are there?
1704    dir, base = os.path.split(basename_in)
1705    #print "basename_in",basename_in
1706    #print "base",base
1707
1708    iterate_over = get_all_swwfiles(dir,base,verbose)
1709   
1710    if dir == "":
1711        dir = "." # Unix compatibility
1712   
1713    files_out = []
1714    #print 'sww_file',iterate_over
1715    for sww_file in iterate_over:
1716        for quantity in quantities:
1717            if extra_name_out is None:
1718                basename_out = sww_file + '_' + quantity
1719            else:
1720                basename_out = sww_file + '_' + quantity + '_' \
1721                               + extra_name_out
1722#            print "basename_out", basename_out
1723       
1724            file_out = sww2dem(dir+sep+sww_file, dir+sep+basename_out,
1725                               quantity, 
1726                               timestep,
1727                               reduction,
1728                               cellsize,
1729                               NODATA_value,
1730                               easting_min,
1731                               easting_max,
1732                               northing_min,
1733                               northing_max,
1734                               verbose,
1735                               origin,
1736                               datum,
1737                               format)
1738            files_out.append(file_out)
1739    #print "basenames_out after",basenames_out
1740    return files_out
1741
1742
1743def get_timeseries(production_dirs, output_dir, scenario_name, gauges_dir_name,
1744                   plot_quantity, generate_fig = False,
1745                   reportname = None, surface = False, time_min = None,
1746                   time_max = None, title_on = False, verbose = True,
1747                   nodes=None):
1748    """
1749    nodes - number of processes used.
1750
1751    warning - this function has no tests
1752    """
1753    if reportname == None:
1754        report = False
1755    else:
1756        report = True
1757       
1758    if nodes is None:
1759        is_parallel = False
1760    else:
1761        is_parallel = True
1762       
1763    # Generate figures
1764    swwfiles = {}
1765   
1766    if is_parallel is True:   
1767        for i in range(nodes):
1768            print 'Sending node %d of %d' %(i,nodes)
1769            swwfiles = {}
1770            if not reportname == None:
1771                reportname = report_name + '_%s' %(i)
1772            for label_id in production_dirs.keys():
1773                if label_id == 'boundaries':
1774                    swwfile = best_boundary_sww
1775                else:
1776                    file_loc = output_dir + label_id + sep
1777                    sww_extra = '_P%s_%s' %(i,nodes)
1778                    swwfile = file_loc + scenario_name + sww_extra + '.sww'
1779                    print 'swwfile',swwfile
1780                    swwfiles[swwfile] = label_id
1781
1782                texname, elev_output = sww2timeseries(swwfiles,
1783                                              gauges_dir_name,
1784                                              production_dirs,
1785                                              report = report,
1786                                              reportname = reportname,
1787                                              plot_quantity = plot_quantity,
1788                                              generate_fig = generate_fig,
1789                                              surface = surface,
1790                                              time_min = time_min,
1791                                              time_max = time_max,
1792                                              title_on = title_on,
1793                                              verbose = verbose)
1794    else:   
1795        for label_id in production_dirs.keys():       
1796            if label_id == 'boundaries':
1797                print 'boundaries'
1798                file_loc = project.boundaries_in_dir
1799                swwfile = project.boundaries_dir_name3 + '.sww'
1800                #  swwfile = boundary_dir_filename
1801            else:
1802                file_loc = output_dir + label_id + sep
1803                swwfile = file_loc + scenario_name + '.sww'
1804            swwfiles[swwfile] = label_id
1805       
1806        texname, elev_output = sww2timeseries(swwfiles,
1807                                              gauges_dir_name,
1808                                              production_dirs,
1809                                              report = report,
1810                                              reportname = reportname,
1811                                              plot_quantity = plot_quantity,
1812                                              generate_fig = generate_fig,
1813                                              surface = surface,
1814                                              time_min = time_min,
1815                                              time_max = time_max,
1816                                              title_on = title_on,
1817                                              verbose = verbose)
1818                                         
1819
1820   
1821def sww2dem(basename_in, basename_out = None,
1822            quantity = None, # defaults to elevation
1823            timestep = None,
1824            reduction = None,
1825            cellsize = 10,
1826            NODATA_value = -9999,
1827            easting_min = None,
1828            easting_max = None,
1829            northing_min = None,
1830            northing_max = None,
1831            verbose = False,
1832            origin = None,
1833            datum = 'WGS84',
1834            format = 'ers'):
1835
1836    """Read SWW file and convert to Digitial Elevation model format
1837    (.asc or .ers)
1838
1839    Example (ASC):
1840
1841    ncols         3121
1842    nrows         1800
1843    xllcorner     722000
1844    yllcorner     5893000
1845    cellsize      25
1846    NODATA_value  -9999
1847    138.3698 137.4194 136.5062 135.5558 ..........
1848
1849    Also write accompanying file with same basename_in but extension .prj
1850    used to fix the UTM zone, datum, false northings and eastings.
1851
1852    The prj format is assumed to be as
1853
1854    Projection    UTM
1855    Zone          56
1856    Datum         WGS84
1857    Zunits        NO
1858    Units         METERS
1859    Spheroid      WGS84
1860    Xshift        0.0000000000
1861    Yshift        10000000.0000000000
1862    Parameters
1863
1864
1865    The parameter quantity must be the name of an existing quantity or
1866    an expression involving existing quantities. The default is
1867    'elevation'. Quantity is not a list of quantities.
1868
1869    if timestep (an index) is given, output quantity at that timestep
1870
1871    if reduction is given use that to reduce quantity over all timesteps.
1872
1873    datum
1874
1875    format can be either 'asc' or 'ers'
1876    """
1877
1878    import sys
1879    from Numeric import array, Float, concatenate, NewAxis, zeros, reshape, \
1880         sometrue
1881    from Numeric import array2string
1882
1883    from anuga.utilities.polygon import inside_polygon, outside_polygon, \
1884         separate_points_by_polygon
1885    from anuga.abstract_2d_finite_volumes.util import \
1886         apply_expression_to_dictionary
1887
1888    msg = 'Format must be either asc or ers'
1889    assert format.lower() in ['asc', 'ers'], msg
1890
1891
1892    false_easting = 500000
1893    false_northing = 10000000
1894
1895    if quantity is None:
1896        quantity = 'elevation'
1897       
1898    if reduction is None:
1899        reduction = max
1900
1901    if basename_out is None:
1902        basename_out = basename_in + '_%s' %quantity
1903
1904    if quantity_formula.has_key(quantity):
1905        quantity = quantity_formula[quantity]
1906       
1907    swwfile = basename_in + '.sww'
1908    demfile = basename_out + '.' + format
1909    # Note the use of a .ers extension is optional (write_ermapper_grid will
1910    # deal with either option
1911   
1912    #if verbose: bye= nsuadsfd[0] # uncomment to check catching verbose errors
1913   
1914    # Read sww file
1915    if verbose: 
1916        print 'Reading from %s' %swwfile
1917        print 'Output directory is %s' %basename_out
1918   
1919    from Scientific.IO.NetCDF import NetCDFFile
1920    fid = NetCDFFile(swwfile)
1921
1922    #Get extent and reference
1923    x = fid.variables['x'][:]
1924    y = fid.variables['y'][:]
1925    volumes = fid.variables['volumes'][:]
1926    if timestep is not None:
1927        times = fid.variables['time'][timestep]
1928    else:
1929        times = fid.variables['time'][:]
1930
1931    number_of_timesteps = fid.dimensions['number_of_timesteps']
1932    number_of_points = fid.dimensions['number_of_points']
1933   
1934    if origin is None:
1935
1936        # Get geo_reference
1937        # sww files don't have to have a geo_ref
1938        try:
1939            geo_reference = Geo_reference(NetCDFObject=fid)
1940        except AttributeError, e:
1941            geo_reference = Geo_reference() # Default georef object
1942
1943        xllcorner = geo_reference.get_xllcorner()
1944        yllcorner = geo_reference.get_yllcorner()
1945        zone = geo_reference.get_zone()
1946    else:
1947        zone = origin[0]
1948        xllcorner = origin[1]
1949        yllcorner = origin[2]
1950
1951
1952
1953    # FIXME: Refactor using code from Interpolation_function.statistics
1954    # (in interpolate.py)
1955    # Something like print swwstats(swwname)
1956    if verbose:
1957        print '------------------------------------------------'
1958        print 'Statistics of SWW file:'
1959        print '  Name: %s' %swwfile
1960        print '  Reference:'
1961        print '    Lower left corner: [%f, %f]'\
1962              %(xllcorner, yllcorner)
1963        if timestep is not None:
1964            print '    Time: %f' %(times)
1965        else:
1966            print '    Start time: %f' %fid.starttime[0]
1967        print '  Extent:'
1968        print '    x [m] in [%f, %f], len(x) == %d'\
1969              %(min(x.flat), max(x.flat), len(x.flat))
1970        print '    y [m] in [%f, %f], len(y) == %d'\
1971              %(min(y.flat), max(y.flat), len(y.flat))
1972        if timestep is not None:
1973            print '    t [s] = %f, len(t) == %d' %(times, 1)
1974        else:
1975            print '    t [s] in [%f, %f], len(t) == %d'\
1976              %(min(times), max(times), len(times))
1977        print '  Quantities [SI units]:'
1978        # Comment out for reduced memory consumption
1979        for name in ['stage', 'xmomentum', 'ymomentum']:
1980            q = fid.variables[name][:].flat
1981            if timestep is not None:
1982                q = q[timestep*len(x):(timestep+1)*len(x)]
1983            if verbose: print '    %s in [%f, %f]' %(name, min(q), max(q))
1984        for name in ['elevation']:
1985            q = fid.variables[name][:].flat
1986            if verbose: print '    %s in [%f, %f]' %(name, min(q), max(q))
1987           
1988    # Get quantity and reduce if applicable
1989    if verbose: print 'Processing quantity %s' %quantity
1990
1991    # Turn NetCDF objects into Numeric arrays
1992    try:
1993        q = fid.variables[quantity][:] 
1994       
1995       
1996    except:
1997        quantity_dict = {}
1998        for name in fid.variables.keys():
1999            quantity_dict[name] = fid.variables[name][:] 
2000        #Convert quantity expression to quantities found in sww file   
2001        q = apply_expression_to_dictionary(quantity, quantity_dict)
2002    #print "q.shape",q.shape
2003    if len(q.shape) == 2:
2004        #q has a time component and needs to be reduced along
2005        #the temporal dimension
2006        if verbose: print 'Reducing quantity %s' %quantity
2007        q_reduced = zeros( number_of_points, Float )
2008       
2009        if timestep is not None:
2010            for k in range(number_of_points):
2011                q_reduced[k] = q[timestep,k] 
2012        else:
2013            for k in range(number_of_points):
2014                q_reduced[k] = reduction( q[:,k] )
2015
2016        q = q_reduced
2017
2018    #Post condition: Now q has dimension: number_of_points
2019    assert len(q.shape) == 1
2020    assert q.shape[0] == number_of_points
2021
2022    if verbose:
2023        print 'Processed values for %s are in [%f, %f]' %(quantity, min(q), max(q))
2024
2025
2026    #Create grid and update xll/yll corner and x,y
2027
2028    #Relative extent
2029    if easting_min is None:
2030        xmin = min(x)
2031    else:
2032        xmin = easting_min - xllcorner
2033
2034    if easting_max is None:
2035        xmax = max(x)
2036    else:
2037        xmax = easting_max - xllcorner
2038
2039    if northing_min is None:
2040        ymin = min(y)
2041    else:
2042        ymin = northing_min - yllcorner
2043
2044    if northing_max is None:
2045        ymax = max(y)
2046    else:
2047        ymax = northing_max - yllcorner
2048
2049
2050
2051    if verbose: print 'Creating grid'
2052    ncols = int((xmax-xmin)/cellsize)+1
2053    nrows = int((ymax-ymin)/cellsize)+1
2054
2055
2056    #New absolute reference and coordinates
2057    newxllcorner = xmin+xllcorner
2058    newyllcorner = ymin+yllcorner
2059
2060    x = x+xllcorner-newxllcorner
2061    y = y+yllcorner-newyllcorner
2062   
2063    vertex_points = concatenate ((x[:, NewAxis] ,y[:, NewAxis]), axis = 1)
2064    assert len(vertex_points.shape) == 2
2065
2066    grid_points = zeros ( (ncols*nrows, 2), Float )
2067
2068
2069    for i in xrange(nrows):
2070        if format.lower() == 'asc':
2071            yg = i*cellsize
2072        else:
2073        #this will flip the order of the y values for ers
2074            yg = (nrows-i)*cellsize
2075
2076        for j in xrange(ncols):
2077            xg = j*cellsize
2078            k = i*ncols + j
2079
2080            grid_points[k,0] = xg
2081            grid_points[k,1] = yg
2082
2083    #Interpolate
2084    from anuga.fit_interpolate.interpolate import Interpolate
2085
2086    # Remove loners from vertex_points, volumes here
2087    vertex_points, volumes = remove_lone_verts(vertex_points, volumes)
2088    #export_mesh_file('monkey.tsh',{'vertices':vertex_points, 'triangles':volumes})
2089    #import sys; sys.exit()
2090    interp = Interpolate(vertex_points, volumes, verbose = verbose)
2091
2092    #Interpolate using quantity values
2093    if verbose: print 'Interpolating'
2094    grid_values = interp.interpolate(q, grid_points).flat
2095
2096
2097    if verbose:
2098        print 'Interpolated values are in [%f, %f]' %(min(grid_values),
2099                                                      max(grid_values))
2100
2101    #Assign NODATA_value to all points outside bounding polygon (from interpolation mesh)
2102    P = interp.mesh.get_boundary_polygon()
2103    outside_indices = outside_polygon(grid_points, P, closed=True)
2104
2105    for i in outside_indices:
2106        grid_values[i] = NODATA_value
2107
2108
2109
2110
2111    if format.lower() == 'ers':
2112        # setup ERS header information
2113        grid_values = reshape(grid_values,(nrows, ncols))
2114        header = {}
2115        header['datum'] = '"' + datum + '"'
2116        # FIXME The use of hardwired UTM and zone number needs to be made optional
2117        # FIXME Also need an automatic test for coordinate type (i.e. EN or LL)
2118        header['projection'] = '"UTM-' + str(zone) + '"'
2119        header['coordinatetype'] = 'EN'
2120        if header['coordinatetype'] == 'LL':
2121            header['longitude'] = str(newxllcorner)
2122            header['latitude'] = str(newyllcorner)
2123        elif header['coordinatetype'] == 'EN':
2124            header['eastings'] = str(newxllcorner)
2125            header['northings'] = str(newyllcorner)
2126        header['nullcellvalue'] = str(NODATA_value)
2127        header['xdimension'] = str(cellsize)
2128        header['ydimension'] = str(cellsize)
2129        header['value'] = '"' + quantity + '"'
2130        #header['celltype'] = 'IEEE8ByteReal'  #FIXME: Breaks unit test
2131
2132
2133        #Write
2134        if verbose: print 'Writing %s' %demfile
2135        import ermapper_grids
2136        ermapper_grids.write_ermapper_grid(demfile, grid_values, header)
2137
2138        fid.close()
2139    else:
2140        #Write to Ascii format
2141
2142        #Write prj file
2143        prjfile = basename_out + '.prj'
2144
2145        if verbose: print 'Writing %s' %prjfile
2146        prjid = open(prjfile, 'w')
2147        prjid.write('Projection    %s\n' %'UTM')
2148        prjid.write('Zone          %d\n' %zone)
2149        prjid.write('Datum         %s\n' %datum)
2150        prjid.write('Zunits        NO\n')
2151        prjid.write('Units         METERS\n')
2152        prjid.write('Spheroid      %s\n' %datum)
2153        prjid.write('Xshift        %d\n' %false_easting)
2154        prjid.write('Yshift        %d\n' %false_northing)
2155        prjid.write('Parameters\n')
2156        prjid.close()
2157
2158
2159
2160        if verbose: print 'Writing %s' %demfile
2161
2162        ascid = open(demfile, 'w')
2163
2164        ascid.write('ncols         %d\n' %ncols)
2165        ascid.write('nrows         %d\n' %nrows)
2166        ascid.write('xllcorner     %d\n' %newxllcorner)
2167        ascid.write('yllcorner     %d\n' %newyllcorner)
2168        ascid.write('cellsize      %f\n' %cellsize)
2169        ascid.write('NODATA_value  %d\n' %NODATA_value)
2170
2171
2172        #Get bounding polygon from mesh
2173        #P = interp.mesh.get_boundary_polygon()
2174        #inside_indices = inside_polygon(grid_points, P)
2175
2176        for i in range(nrows):
2177            if verbose and i%((nrows+10)/10)==0:
2178                print 'Doing row %d of %d' %(i, nrows)
2179
2180            base_index = (nrows-i-1)*ncols
2181
2182            slice = grid_values[base_index:base_index+ncols]
2183            s = array2string(slice, max_line_width=sys.maxint)
2184            ascid.write(s[1:-1] + '\n')
2185
2186
2187            #print
2188            #for j in range(ncols):
2189            #    index = base_index+j#
2190            #    print grid_values[index],
2191            #    ascid.write('%f ' %grid_values[index])
2192            #ascid.write('\n')
2193
2194        #Close
2195        ascid.close()
2196        fid.close()
2197        return basename_out
2198
2199
2200#Backwards compatibility
2201def sww2asc(basename_in, basename_out = None,
2202            quantity = None,
2203            timestep = None,
2204            reduction = None,
2205            cellsize = 10,
2206            verbose = False,
2207            origin = None):
2208    print 'sww2asc will soon be obsoleted - please use sww2dem'
2209    sww2dem(basename_in,
2210            basename_out = basename_out,
2211            quantity = quantity,
2212            timestep = timestep,
2213            reduction = reduction,
2214            cellsize = cellsize,
2215            verbose = verbose,
2216            origin = origin,
2217        datum = 'WGS84',
2218        format = 'asc')
2219
2220def sww2ers(basename_in, basename_out = None,
2221            quantity = None,
2222            timestep = None,
2223            reduction = None,
2224            cellsize = 10,
2225            verbose = False,
2226            origin = None,
2227            datum = 'WGS84'):
2228    print 'sww2ers will soon be obsoleted - please use sww2dem'
2229    sww2dem(basename_in,
2230            basename_out = basename_out,
2231            quantity = quantity,
2232            timestep = timestep,
2233            reduction = reduction,
2234            cellsize = cellsize,
2235            verbose = verbose,
2236            origin = origin,
2237            datum = datum,
2238            format = 'ers')
2239################################# END COMPATIBILITY ##############
2240
2241
2242
2243def sww2pts(basename_in, basename_out=None,
2244            data_points=None,
2245            quantity=None,
2246            timestep=None,
2247            reduction=None,
2248            NODATA_value=-9999,
2249            verbose=False,
2250            origin=None):
2251            #datum = 'WGS84')
2252
2253
2254    """Read SWW file and convert to interpolated values at selected points
2255
2256    The parameter quantity' must be the name of an existing quantity or
2257    an expression involving existing quantities. The default is
2258    'elevation'.
2259
2260    if timestep (an index) is given, output quantity at that timestep
2261
2262    if reduction is given use that to reduce quantity over all timesteps.
2263
2264    data_points (Nx2 array) give locations of points where quantity is to be computed.
2265   
2266    """
2267
2268    import sys
2269    from Numeric import array, Float, concatenate, NewAxis, zeros, reshape, sometrue
2270    from Numeric import array2string
2271
2272    from anuga.utilities.polygon import inside_polygon, outside_polygon, separate_points_by_polygon
2273    from anuga.abstract_2d_finite_volumes.util import apply_expression_to_dictionary
2274
2275    from anuga.geospatial_data.geospatial_data import Geospatial_data
2276
2277    if quantity is None:
2278        quantity = 'elevation'
2279
2280    if reduction is None:
2281        reduction = max
2282
2283    if basename_out is None:
2284        basename_out = basename_in + '_%s' %quantity
2285
2286    swwfile = basename_in + '.sww'
2287    ptsfile = basename_out + '.pts'
2288
2289    # Read sww file
2290    if verbose: print 'Reading from %s' %swwfile
2291    from Scientific.IO.NetCDF import NetCDFFile
2292    fid = NetCDFFile(swwfile)
2293
2294    # Get extent and reference
2295    x = fid.variables['x'][:]
2296    y = fid.variables['y'][:]
2297    volumes = fid.variables['volumes'][:]
2298
2299    number_of_timesteps = fid.dimensions['number_of_timesteps']
2300    number_of_points = fid.dimensions['number_of_points']
2301    if origin is None:
2302
2303        # Get geo_reference
2304        # sww files don't have to have a geo_ref
2305        try:
2306            geo_reference = Geo_reference(NetCDFObject=fid)
2307        except AttributeError, e:
2308            geo_reference = Geo_reference() #Default georef object
2309
2310        xllcorner = geo_reference.get_xllcorner()
2311        yllcorner = geo_reference.get_yllcorner()
2312        zone = geo_reference.get_zone()
2313    else:
2314        zone = origin[0]
2315        xllcorner = origin[1]
2316        yllcorner = origin[2]
2317
2318
2319
2320    # FIXME: Refactor using code from file_function.statistics
2321    # Something like print swwstats(swwname)
2322    if verbose:
2323        x = fid.variables['x'][:]
2324        y = fid.variables['y'][:]
2325        times = fid.variables['time'][:]
2326        print '------------------------------------------------'
2327        print 'Statistics of SWW file:'
2328        print '  Name: %s' %swwfile
2329        print '  Reference:'
2330        print '    Lower left corner: [%f, %f]'\
2331              %(xllcorner, yllcorner)
2332        print '    Start time: %f' %fid.starttime[0]
2333        print '  Extent:'
2334        print '    x [m] in [%f, %f], len(x) == %d'\
2335              %(min(x.flat), max(x.flat), len(x.flat))
2336        print '    y [m] in [%f, %f], len(y) == %d'\
2337              %(min(y.flat), max(y.flat), len(y.flat))
2338        print '    t [s] in [%f, %f], len(t) == %d'\
2339              %(min(times), max(times), len(times))
2340        print '  Quantities [SI units]:'
2341        for name in ['stage', 'xmomentum', 'ymomentum', 'elevation']:
2342            q = fid.variables[name][:].flat
2343            print '    %s in [%f, %f]' %(name, min(q), max(q))
2344
2345
2346
2347    # Get quantity and reduce if applicable
2348    if verbose: print 'Processing quantity %s' %quantity
2349
2350    # Turn NetCDF objects into Numeric arrays
2351    quantity_dict = {}
2352    for name in fid.variables.keys():
2353        quantity_dict[name] = fid.variables[name][:]
2354
2355
2356
2357    # Convert quantity expression to quantities found in sww file   
2358    q = apply_expression_to_dictionary(quantity, quantity_dict)
2359
2360
2361
2362    if len(q.shape) == 2:
2363        # q has a time component and needs to be reduced along
2364        # the temporal dimension
2365        if verbose: print 'Reducing quantity %s' %quantity
2366        q_reduced = zeros( number_of_points, Float )
2367
2368        for k in range(number_of_points):
2369            q_reduced[k] = reduction( q[:,k] )
2370
2371        q = q_reduced
2372
2373    # Post condition: Now q has dimension: number_of_points
2374    assert len(q.shape) == 1
2375    assert q.shape[0] == number_of_points
2376
2377
2378    if verbose:
2379        print 'Processed values for %s are in [%f, %f]' %(quantity, min(q), max(q))
2380
2381
2382    # Create grid and update xll/yll corner and x,y
2383    vertex_points = concatenate ((x[:, NewAxis] ,y[:, NewAxis]), axis = 1)
2384    assert len(vertex_points.shape) == 2
2385
2386    # Interpolate
2387    from anuga.fit_interpolate.interpolate import Interpolate
2388    interp = Interpolate(vertex_points, volumes, verbose = verbose)
2389
2390    # Interpolate using quantity values
2391    if verbose: print 'Interpolating'
2392    interpolated_values = interp.interpolate(q, data_points).flat
2393
2394
2395    if verbose:
2396        print 'Interpolated values are in [%f, %f]' %(min(interpolated_values),
2397                                                      max(interpolated_values))
2398
2399    # Assign NODATA_value to all points outside bounding polygon (from interpolation mesh)
2400    P = interp.mesh.get_boundary_polygon()
2401    outside_indices = outside_polygon(data_points, P, closed=True)
2402
2403    for i in outside_indices:
2404        interpolated_values[i] = NODATA_value
2405
2406
2407    # Store results   
2408    G = Geospatial_data(data_points=data_points,
2409                        attributes=interpolated_values)
2410
2411    G.export_points_file(ptsfile, absolute = True)
2412
2413    fid.close()
2414
2415
2416def convert_dem_from_ascii2netcdf(basename_in, basename_out = None,
2417                                  use_cache = False,
2418                                  verbose = False):
2419    """Read Digitial Elevation model from the following ASCII format (.asc)
2420
2421    Example:
2422
2423    ncols         3121
2424    nrows         1800
2425    xllcorner     722000
2426    yllcorner     5893000
2427    cellsize      25
2428    NODATA_value  -9999
2429    138.3698 137.4194 136.5062 135.5558 ..........
2430
2431    Convert basename_in + '.asc' to NetCDF format (.dem)
2432    mimicking the ASCII format closely.
2433
2434
2435    An accompanying file with same basename_in but extension .prj must exist
2436    and is used to fix the UTM zone, datum, false northings and eastings.
2437
2438    The prj format is assumed to be as
2439
2440    Projection    UTM
2441    Zone          56
2442    Datum         WGS84
2443    Zunits        NO
2444    Units         METERS
2445    Spheroid      WGS84
2446    Xshift        0.0000000000
2447    Yshift        10000000.0000000000
2448    Parameters
2449    """
2450
2451
2452
2453    kwargs = {'basename_out': basename_out, 'verbose': verbose}
2454
2455    if use_cache is True:
2456        from caching import cache
2457        result = cache(_convert_dem_from_ascii2netcdf, basename_in, kwargs,
2458                       dependencies = [basename_in + '.asc',
2459                                       basename_in + '.prj'],
2460                       verbose = verbose)
2461
2462    else:
2463        result = apply(_convert_dem_from_ascii2netcdf, [basename_in], kwargs)
2464
2465    return result
2466
2467
2468
2469
2470
2471
2472def _convert_dem_from_ascii2netcdf(basename_in, basename_out = None,
2473                                  verbose = False):
2474    """Read Digitial Elevation model from the following ASCII format (.asc)
2475
2476    Internal function. See public function convert_dem_from_ascii2netcdf for details.
2477    """
2478
2479    import os
2480    from Scientific.IO.NetCDF import NetCDFFile
2481    from Numeric import Float, array
2482
2483    #root, ext = os.path.splitext(basename_in)
2484    root = basename_in
2485
2486    ###########################################
2487    # Read Meta data
2488    if verbose: print 'Reading METADATA from %s' %root + '.prj'
2489    metadatafile = open(root + '.prj')
2490    metalines = metadatafile.readlines()
2491    metadatafile.close()
2492
2493    L = metalines[0].strip().split()
2494    assert L[0].strip().lower() == 'projection'
2495    projection = L[1].strip()                   #TEXT
2496
2497    L = metalines[1].strip().split()
2498    assert L[0].strip().lower() == 'zone'
2499    zone = int(L[1].strip())
2500
2501    L = metalines[2].strip().split()
2502    assert L[0].strip().lower() == 'datum'
2503    datum = L[1].strip()                        #TEXT
2504
2505    L = metalines[3].strip().split()
2506    assert L[0].strip().lower() == 'zunits'     #IGNORE
2507    zunits = L[1].strip()                       #TEXT
2508
2509    L = metalines[4].strip().split()
2510    assert L[0].strip().lower() == 'units'
2511    units = L[1].strip()                        #TEXT
2512
2513    L = metalines[5].strip().split()
2514    assert L[0].strip().lower() == 'spheroid'   #IGNORE
2515    spheroid = L[1].strip()                     #TEXT
2516
2517    L = metalines[6].strip().split()
2518    assert L[0].strip().lower() == 'xshift'
2519    false_easting = float(L[1].strip())
2520
2521    L = metalines[7].strip().split()
2522    assert L[0].strip().lower() == 'yshift'
2523    false_northing = float(L[1].strip())
2524
2525    #print false_easting, false_northing, zone, datum
2526
2527
2528    ###########################################
2529    #Read DEM data
2530
2531    datafile = open(basename_in + '.asc')
2532
2533    if verbose: print 'Reading DEM from %s' %(basename_in + '.asc')
2534    lines = datafile.readlines()
2535    datafile.close()
2536
2537    if verbose: print 'Got', len(lines), ' lines'
2538
2539    ncols = int(lines[0].split()[1].strip())
2540    nrows = int(lines[1].split()[1].strip())
2541
2542    # Do cellsize (line 4) before line 2 and 3
2543    cellsize = float(lines[4].split()[1].strip())   
2544
2545    # Checks suggested by Joaquim Luis
2546    # Our internal representation of xllcorner
2547    # and yllcorner is non-standard.
2548    xref = lines[2].split()
2549    if xref[0].strip() == 'xllcorner':
2550        xllcorner = float(xref[1].strip()) # + 0.5*cellsize # Correct offset
2551    elif xref[0].strip() == 'xllcenter':
2552        xllcorner = float(xref[1].strip())
2553    else:
2554        msg = 'Unknown keyword: %s' %xref[0].strip()
2555        raise Exception, msg
2556
2557    yref = lines[3].split()
2558    if yref[0].strip() == 'yllcorner':
2559        yllcorner = float(yref[1].strip()) # + 0.5*cellsize # Correct offset
2560    elif yref[0].strip() == 'yllcenter':
2561        yllcorner = float(yref[1].strip())
2562    else:
2563        msg = 'Unknown keyword: %s' %yref[0].strip()
2564        raise Exception, msg
2565       
2566
2567    NODATA_value = int(lines[5].split()[1].strip())
2568
2569    assert len(lines) == nrows + 6
2570
2571
2572    ##########################################
2573
2574
2575    if basename_out == None:
2576        netcdfname = root + '.dem'
2577    else:
2578        netcdfname = basename_out + '.dem'
2579
2580    if verbose: print 'Store to NetCDF file %s' %netcdfname
2581    # NetCDF file definition
2582    fid = NetCDFFile(netcdfname, 'w')
2583
2584    #Create new file
2585    fid.institution = 'Geoscience Australia'
2586    fid.description = 'NetCDF DEM format for compact and portable storage ' +\
2587                      'of spatial point data'
2588
2589    fid.ncols = ncols
2590    fid.nrows = nrows
2591    fid.xllcorner = xllcorner
2592    fid.yllcorner = yllcorner
2593    fid.cellsize = cellsize
2594    fid.NODATA_value = NODATA_value
2595
2596    fid.zone = zone
2597    fid.false_easting = false_easting
2598    fid.false_northing = false_northing
2599    fid.projection = projection
2600    fid.datum = datum
2601    fid.units = units
2602
2603
2604    # dimension definitions
2605    fid.createDimension('number_of_rows', nrows)
2606    fid.createDimension('number_of_columns', ncols)
2607
2608    # variable definitions
2609    fid.createVariable('elevation', Float, ('number_of_rows',
2610                                            'number_of_columns'))
2611
2612    # Get handles to the variables
2613    elevation = fid.variables['elevation']
2614
2615    #Store data
2616    n = len(lines[6:])
2617    for i, line in enumerate(lines[6:]):
2618        fields = line.split()
2619        if verbose and i%((n+10)/10)==0:
2620            print 'Processing row %d of %d' %(i, nrows)
2621
2622        elevation[i, :] = array([float(x) for x in fields])
2623
2624    fid.close()
2625
2626
2627
2628
2629
2630def ferret2sww(basename_in, basename_out = None,
2631               verbose = False,
2632               minlat = None, maxlat = None,
2633               minlon = None, maxlon = None,
2634               mint = None, maxt = None, mean_stage = 0,
2635               origin = None, zscale = 1,
2636               fail_on_NaN = True,
2637               NaN_filler = 0,
2638               elevation = None,
2639               inverted_bathymetry = True
2640               ): #FIXME: Bathymetry should be obtained
2641                                  #from MOST somehow.
2642                                  #Alternatively from elsewhere
2643                                  #or, as a last resort,
2644                                  #specified here.
2645                                  #The value of -100 will work
2646                                  #for the Wollongong tsunami
2647                                  #scenario but is very hacky
2648    """Convert MOST and 'Ferret' NetCDF format for wave propagation to
2649    sww format native to abstract_2d_finite_volumes.
2650
2651    Specify only basename_in and read files of the form
2652    basefilename_ha.nc, basefilename_ua.nc, basefilename_va.nc containing
2653    relative height, x-velocity and y-velocity, respectively.
2654
2655    Also convert latitude and longitude to UTM. All coordinates are
2656    assumed to be given in the GDA94 datum.
2657
2658    min's and max's: If omitted - full extend is used.
2659    To include a value min may equal it, while max must exceed it.
2660    Lat and lon are assuemd to be in decimal degrees
2661
2662    origin is a 3-tuple with geo referenced
2663    UTM coordinates (zone, easting, northing)
2664
2665    nc format has values organised as HA[TIME, LATITUDE, LONGITUDE]
2666    which means that longitude is the fastest
2667    varying dimension (row major order, so to speak)
2668
2669    ferret2sww uses grid points as vertices in a triangular grid
2670    counting vertices from lower left corner upwards, then right
2671    """
2672
2673    import os
2674    from Scientific.IO.NetCDF import NetCDFFile
2675    from Numeric import Float, Int, Int32, searchsorted, zeros, array
2676    from Numeric import allclose, around
2677
2678    precision = Float
2679
2680    msg = 'Must use latitudes and longitudes for minlat, maxlon etc'
2681
2682    if minlat != None:
2683        assert -90 < minlat < 90 , msg
2684    if maxlat != None:
2685        assert -90 < maxlat < 90 , msg
2686        if minlat != None:
2687            assert maxlat > minlat
2688    if minlon != None:
2689        assert -180 < minlon < 180 , msg
2690    if maxlon != None:
2691        assert -180 < maxlon < 180 , msg
2692        if minlon != None:
2693            assert maxlon > minlon
2694       
2695
2696
2697    #Get NetCDF data
2698    if verbose: print 'Reading files %s_*.nc' %basename_in
2699    #print "basename_in + '_ha.nc'",basename_in + '_ha.nc'
2700    file_h = NetCDFFile(basename_in + '_ha.nc', 'r') #Wave amplitude (cm)
2701    file_u = NetCDFFile(basename_in + '_ua.nc', 'r') #Velocity (x) (cm/s)
2702    file_v = NetCDFFile(basename_in + '_va.nc', 'r') #Velocity (y) (cm/s)
2703    file_e = NetCDFFile(basename_in + '_e.nc', 'r')  #Elevation (z) (m)
2704
2705    if basename_out is None:
2706        swwname = basename_in + '.sww'
2707    else:
2708        swwname = basename_out + '.sww'
2709
2710    # Get dimensions of file_h
2711    for dimension in file_h.dimensions.keys():
2712        if dimension[:3] == 'LON':
2713            dim_h_longitude = dimension
2714        if dimension[:3] == 'LAT':
2715            dim_h_latitude = dimension
2716        if dimension[:4] == 'TIME':
2717            dim_h_time = dimension
2718
2719#    print 'long:', dim_h_longitude
2720#    print 'lats:', dim_h_latitude
2721#    print 'times:', dim_h_time
2722
2723    times = file_h.variables[dim_h_time]
2724    latitudes = file_h.variables[dim_h_latitude]
2725    longitudes = file_h.variables[dim_h_longitude]
2726
2727    kmin, kmax, lmin, lmax = _get_min_max_indexes(latitudes[:],
2728                                                  longitudes[:],
2729                                                  minlat, maxlat,
2730                                                  minlon, maxlon)
2731    # get dimensions for file_e
2732    for dimension in file_e.dimensions.keys():
2733        if dimension[:3] == 'LON':
2734            dim_e_longitude = dimension
2735        if dimension[:3] == 'LAT':
2736            dim_e_latitude = dimension
2737
2738    # get dimensions for file_u
2739    for dimension in file_u.dimensions.keys():
2740        if dimension[:3] == 'LON':
2741            dim_u_longitude = dimension
2742        if dimension[:3] == 'LAT':
2743            dim_u_latitude = dimension
2744        if dimension[:4] == 'TIME':
2745            dim_u_time = dimension
2746           
2747    # get dimensions for file_v
2748    for dimension in file_v.dimensions.keys():
2749        if dimension[:3] == 'LON':
2750            dim_v_longitude = dimension
2751        if dimension[:3] == 'LAT':
2752            dim_v_latitude = dimension
2753        if dimension[:4] == 'TIME':
2754            dim_v_time = dimension
2755
2756
2757    # Precision used by most for lat/lon is 4 or 5 decimals
2758    e_lat = around(file_e.variables[dim_e_latitude][:], 5)
2759    e_lon = around(file_e.variables[dim_e_longitude][:], 5)
2760
2761    # Check that files are compatible
2762    assert allclose(latitudes, file_u.variables[dim_u_latitude])
2763    assert allclose(latitudes, file_v.variables[dim_v_latitude])
2764    assert allclose(latitudes, e_lat)
2765
2766    assert allclose(longitudes, file_u.variables[dim_u_longitude])
2767    assert allclose(longitudes, file_v.variables[dim_v_longitude])
2768    assert allclose(longitudes, e_lon)
2769
2770    if mint is None:
2771        jmin = 0
2772        mint = times[0]       
2773    else:
2774        jmin = searchsorted(times, mint)
2775       
2776    if maxt is None:
2777        jmax = len(times)
2778        maxt = times[-1]
2779    else:
2780        jmax = searchsorted(times, maxt)
2781
2782    #print "latitudes[:]",latitudes[:]
2783    #print "longitudes[:]",longitudes [:]
2784    kmin, kmax, lmin, lmax = _get_min_max_indexes(latitudes[:],
2785                                                  longitudes[:],
2786                                                  minlat, maxlat,
2787                                                  minlon, maxlon)
2788
2789
2790    times = times[jmin:jmax]
2791    latitudes = latitudes[kmin:kmax]
2792    longitudes = longitudes[lmin:lmax]
2793
2794    #print "latitudes[:]",latitudes[:]
2795    #print "longitudes[:]",longitudes [:]
2796
2797    if verbose: print 'cropping'
2798    zname = 'ELEVATION'
2799
2800    amplitudes = file_h.variables['HA'][jmin:jmax, kmin:kmax, lmin:lmax]
2801    uspeed = file_u.variables['UA'][jmin:jmax, kmin:kmax, lmin:lmax] #Lon
2802    vspeed = file_v.variables['VA'][jmin:jmax, kmin:kmax, lmin:lmax] #Lat
2803    elevations = file_e.variables[zname][kmin:kmax, lmin:lmax]
2804
2805    #    if latitudes2[0]==latitudes[0] and latitudes2[-1]==latitudes[-1]:
2806    #        elevations = file_e.variables['ELEVATION'][kmin:kmax, lmin:lmax]
2807    #    elif latitudes2[0]==latitudes[-1] and latitudes2[-1]==latitudes[0]:
2808    #        from Numeric import asarray
2809    #        elevations=elevations.tolist()
2810    #        elevations.reverse()
2811    #        elevations=asarray(elevations)
2812    #    else:
2813    #        from Numeric import asarray
2814    #        elevations=elevations.tolist()
2815    #        elevations.reverse()
2816    #        elevations=asarray(elevations)
2817    #        'print hmmm'
2818
2819
2820
2821    #Get missing values
2822    nan_ha = file_h.variables['HA'].missing_value[0]
2823    nan_ua = file_u.variables['UA'].missing_value[0]
2824    nan_va = file_v.variables['VA'].missing_value[0]
2825    if hasattr(file_e.variables[zname],'missing_value'):
2826        nan_e  = file_e.variables[zname].missing_value[0]
2827    else:
2828        nan_e = None
2829
2830    #Cleanup
2831    from Numeric import sometrue
2832
2833    missing = (amplitudes == nan_ha)
2834    if sometrue (missing):
2835        if fail_on_NaN:
2836            msg = 'NetCDFFile %s contains missing values'\
2837                  %(basename_in+'_ha.nc')
2838            raise DataMissingValuesError, msg
2839        else:
2840            amplitudes = amplitudes*(missing==0) + missing*NaN_filler
2841
2842    missing = (uspeed == nan_ua)
2843    if sometrue (missing):
2844        if fail_on_NaN:
2845            msg = 'NetCDFFile %s contains missing values'\
2846                  %(basename_in+'_ua.nc')
2847            raise DataMissingValuesError, msg
2848        else:
2849            uspeed = uspeed*(missing==0) + missing*NaN_filler
2850
2851    missing = (vspeed == nan_va)
2852    if sometrue (missing):
2853        if fail_on_NaN:
2854            msg = 'NetCDFFile %s contains missing values'\
2855                  %(basename_in+'_va.nc')
2856            raise DataMissingValuesError, msg
2857        else:
2858            vspeed = vspeed*(missing==0) + missing*NaN_filler
2859
2860
2861    missing = (elevations == nan_e)
2862    if sometrue (missing):
2863        if fail_on_NaN:
2864            msg = 'NetCDFFile %s contains missing values'\
2865                  %(basename_in+'_e.nc')
2866            raise DataMissingValuesError, msg
2867        else:
2868            elevations = elevations*(missing==0) + missing*NaN_filler
2869
2870    #######
2871
2872
2873
2874    number_of_times = times.shape[0]
2875    number_of_latitudes = latitudes.shape[0]
2876    number_of_longitudes = longitudes.shape[0]
2877
2878    assert amplitudes.shape[0] == number_of_times
2879    assert amplitudes.shape[1] == number_of_latitudes
2880    assert amplitudes.shape[2] == number_of_longitudes
2881
2882    if verbose:
2883        print '------------------------------------------------'
2884        print 'Statistics:'
2885        print '  Extent (lat/lon):'
2886        print '    lat in [%f, %f], len(lat) == %d'\
2887              %(min(latitudes.flat), max(latitudes.flat),
2888                len(latitudes.flat))
2889        print '    lon in [%f, %f], len(lon) == %d'\
2890              %(min(longitudes.flat), max(longitudes.flat),
2891                len(longitudes.flat))
2892        print '    t in [%f, %f], len(t) == %d'\
2893              %(min(times.flat), max(times.flat), len(times.flat))
2894
2895        q = amplitudes.flat
2896        name = 'Amplitudes (ha) [cm]'
2897        print %s in [%f, %f]' %(name, min(q), max(q))
2898
2899        q = uspeed.flat
2900        name = 'Speeds (ua) [cm/s]'
2901        print %s in [%f, %f]' %(name, min(q), max(q))
2902
2903        q = vspeed.flat
2904        name = 'Speeds (va) [cm/s]'
2905        print %s in [%f, %f]' %(name, min(q), max(q))
2906
2907        q = elevations.flat
2908        name = 'Elevations (e) [m]'
2909        print %s in [%f, %f]' %(name, min(q), max(q))
2910
2911
2912    # print number_of_latitudes, number_of_longitudes
2913    number_of_points = number_of_latitudes*number_of_longitudes
2914    number_of_volumes = (number_of_latitudes-1)*(number_of_longitudes-1)*2
2915
2916
2917    file_h.close()
2918    file_u.close()
2919    file_v.close()
2920    file_e.close()
2921
2922
2923    # NetCDF file definition
2924    outfile = NetCDFFile(swwname, 'w')
2925
2926    description = 'Converted from Ferret files: %s, %s, %s, %s'\
2927                  %(basename_in + '_ha.nc',
2928                    basename_in + '_ua.nc',
2929                    basename_in + '_va.nc',
2930                    basename_in + '_e.nc')
2931   
2932    # Create new file
2933    starttime = times[0]
2934   
2935    sww = Write_sww()
2936    sww.store_header(outfile, times, number_of_volumes,
2937                     number_of_points, description=description,
2938                     verbose=verbose,sww_precision=Float)
2939
2940    # Store
2941    from anuga.coordinate_transforms.redfearn import redfearn
2942    x = zeros(number_of_points, Float)  #Easting
2943    y = zeros(number_of_points, Float)  #Northing
2944
2945
2946    if verbose: print 'Making triangular grid'
2947
2948    # Check zone boundaries
2949    refzone, _, _ = redfearn(latitudes[0],longitudes[0])
2950
2951    vertices = {}
2952    i = 0
2953    for k, lat in enumerate(latitudes):       #Y direction
2954        for l, lon in enumerate(longitudes):  #X direction
2955
2956            vertices[l,k] = i
2957
2958            zone, easting, northing = redfearn(lat,lon)
2959
2960            msg = 'Zone boundary crossed at longitude =', lon
2961            #assert zone == refzone, msg
2962            #print '%7.2f %7.2f %8.2f %8.2f' %(lon, lat, easting, northing)
2963            x[i] = easting
2964            y[i] = northing
2965            i += 1
2966
2967    #Construct 2 triangles per 'rectangular' element
2968    volumes = []
2969    for l in range(number_of_longitudes-1):    #X direction
2970        for k in range(number_of_latitudes-1): #Y direction
2971            v1 = vertices[l,k+1]
2972            v2 = vertices[l,k]
2973            v3 = vertices[l+1,k+1]
2974            v4 = vertices[l+1,k]
2975
2976            volumes.append([v1,v2,v3]) #Upper element
2977            volumes.append([v4,v3,v2]) #Lower element
2978
2979    volumes = array(volumes)
2980
2981    if origin is None:
2982        origin = Geo_reference(refzone,min(x),min(y))
2983    geo_ref = write_NetCDF_georeference(origin, outfile)
2984   
2985    if elevation is not None:
2986        z = elevation
2987    else:
2988        if inverted_bathymetry:
2989            z = -1*elevations
2990        else:
2991            z = elevations
2992    #FIXME: z should be obtained from MOST and passed in here
2993
2994    #FIXME use the Write_sww instance(sww) to write this info
2995    from Numeric import resize
2996    z = resize(z,outfile.variables['z'][:].shape)
2997    outfile.variables['x'][:] = x - geo_ref.get_xllcorner()
2998    outfile.variables['y'][:] = y - geo_ref.get_yllcorner()
2999    outfile.variables['z'][:] = z             #FIXME HACK for bacwards compat.
3000    outfile.variables['elevation'][:] = z
3001    outfile.variables['volumes'][:] = volumes.astype(Int32) #For Opteron 64
3002
3003
3004
3005    #Time stepping
3006    stage = outfile.variables['stage']
3007    xmomentum = outfile.variables['xmomentum']
3008    ymomentum = outfile.variables['ymomentum']
3009
3010    if verbose: print 'Converting quantities'
3011    n = len(times)
3012    for j in range(n):
3013        if verbose and j%((n+10)/10)==0: print '  Doing %d of %d' %(j, n)
3014        i = 0
3015        for k in range(number_of_latitudes):      #Y direction
3016            for l in range(number_of_longitudes): #X direction
3017                w = zscale*amplitudes[j,k,l]/100 + mean_stage
3018                stage[j,i] = w
3019                h = w - z[i]
3020                xmomentum[j,i] = uspeed[j,k,l]/100*h
3021                ymomentum[j,i] = vspeed[j,k,l]/100*h
3022                i += 1
3023
3024    #outfile.close()
3025
3026    #FIXME: Refactor using code from file_function.statistics
3027    #Something like print swwstats(swwname)
3028    if verbose:
3029        x = outfile.variables['x'][:]
3030        y = outfile.variables['y'][:]
3031        print '------------------------------------------------'
3032        print 'Statistics of output file:'
3033        print '  Name: %s' %swwname
3034        print '  Reference:'
3035        print '    Lower left corner: [%f, %f]'\
3036              %(geo_ref.get_xllcorner(), geo_ref.get_yllcorner())
3037        print ' Start time: %f' %starttime
3038        print '    Min time: %f' %mint
3039        print '    Max time: %f' %maxt
3040        print '  Extent:'
3041        print '    x [m] in [%f, %f], len(x) == %d'\
3042              %(min(x.flat), max(x.flat), len(x.flat))
3043        print '    y [m] in [%f, %f], len(y) == %d'\
3044              %(min(y.flat), max(y.flat), len(y.flat))
3045        print '    t [s] in [%f, %f], len(t) == %d'\
3046              %(min(times), max(times), len(times))
3047        print '  Quantities [SI units]:'
3048        for name in ['stage', 'xmomentum', 'ymomentum', 'elevation']:
3049            q = outfile.variables[name][:].flat
3050            print '    %s in [%f, %f]' %(name, min(q), max(q))
3051
3052
3053
3054    outfile.close()
3055
3056
3057
3058
3059
3060def timefile2netcdf(filename, quantity_names=None, time_as_seconds=False):
3061    """Template for converting typical text files with time series to
3062    NetCDF tms file.
3063
3064
3065    The file format is assumed to be either two fields separated by a comma:
3066
3067        time [DD/MM/YY hh:mm:ss], value0 value1 value2 ...
3068
3069    E.g
3070
3071      31/08/04 00:00:00, 1.328223 0 0
3072      31/08/04 00:15:00, 1.292912 0 0
3073
3074    or time (seconds), value0 value1 value2 ...
3075   
3076      0.0, 1.328223 0 0
3077      0.1, 1.292912 0 0
3078
3079    will provide a time dependent function f(t) with three attributes
3080
3081    filename is assumed to be the rootname with extenisons .txt and .sww
3082    """
3083
3084    import time, calendar
3085    from Numeric import array
3086    from anuga.config import time_format
3087    from anuga.utilities.numerical_tools import ensure_numeric
3088
3089
3090
3091    fid = open(filename + '.txt')
3092    line = fid.readline()
3093    fid.close()
3094
3095    fields = line.split(',')
3096    msg = 'File %s must have the format date, value0 value1 value2 ...'
3097    assert len(fields) == 2, msg
3098
3099    if not time_as_seconds:
3100        try:
3101            starttime = calendar.timegm(time.strptime(fields[0], time_format))
3102        except ValueError:
3103            msg = 'First field in file %s must be' %filename
3104            msg += ' date-time with format %s.\n' %time_format
3105            msg += 'I got %s instead.' %fields[0]
3106            raise DataTimeError, msg
3107    else:
3108        try:
3109            starttime = float(fields[0])
3110        except Error:
3111            msg = "Bad time format"
3112            raise DataTimeError, msg
3113
3114
3115    #Split values
3116    values = []
3117    for value in fields[1].split():
3118        values.append(float(value))
3119
3120    q = ensure_numeric(values)
3121
3122    msg = 'ERROR: File must contain at least one independent value'
3123    assert len(q.shape) == 1, msg
3124
3125
3126
3127    #Read times proper
3128    from Numeric import zeros, Float, alltrue
3129    from anuga.config import time_format
3130    import time, calendar
3131
3132    fid = open(filename + '.txt')
3133    lines = fid.readlines()
3134    fid.close()
3135
3136    N = len(lines)
3137    d = len(q)
3138
3139    T = zeros(N, Float)       #Time
3140    Q = zeros((N, d), Float)  #Values
3141
3142    for i, line in enumerate(lines):
3143        fields = line.split(',')
3144        if not time_as_seconds:
3145            realtime = calendar.timegm(time.strptime(fields[0], time_format))
3146        else:
3147             realtime = float(fields[0])
3148        T[i] = realtime - starttime
3149
3150        for j, value in enumerate(fields[1].split()):
3151            Q[i, j] = float(value)
3152
3153    msg = 'File %s must list time as a monotonuosly ' %filename
3154    msg += 'increasing sequence'
3155    assert alltrue( T[1:] - T[:-1] > 0 ), msg
3156
3157    #Create NetCDF file
3158    from Scientific.IO.NetCDF import NetCDFFile
3159
3160    fid = NetCDFFile(filename + '.tms', 'w')
3161
3162
3163    fid.institution = 'Geoscience Australia'
3164    fid.description = 'Time series'
3165
3166
3167    #Reference point
3168    #Start time in seconds since the epoch (midnight 1/1/1970)
3169    #FIXME: Use Georef
3170    fid.starttime = starttime
3171
3172    # dimension definitions
3173    #fid.createDimension('number_of_volumes', self.number_of_volumes)
3174    #fid.createDimension('number_of_vertices', 3)
3175
3176
3177    fid.createDimension('number_of_timesteps', len(T))
3178
3179    fid.createVariable('time', Float, ('number_of_timesteps',))
3180
3181    fid.variables['time'][:] = T
3182
3183    for i in range(Q.shape[1]):
3184        try:
3185            name = quantity_names[i]
3186        except:
3187            name = 'Attribute%d'%i
3188
3189        fid.createVariable(name, Float, ('number_of_timesteps',))
3190        fid.variables[name][:] = Q[:,i]
3191
3192    fid.close()
3193
3194
3195def extent_sww(file_name):
3196    """
3197    Read in an sww file.
3198
3199    Input;
3200    file_name - the sww file
3201
3202    Output;
3203    z - Vector of bed elevation
3204    volumes - Array.  Each row has 3 values, representing
3205    the vertices that define the volume
3206    time - Vector of the times where there is stage information
3207    stage - array with respect to time and vertices (x,y)
3208    """
3209
3210
3211    from Scientific.IO.NetCDF import NetCDFFile
3212
3213    #Check contents
3214    #Get NetCDF
3215    fid = NetCDFFile(file_name, 'r')
3216
3217    # Get the variables
3218    x = fid.variables['x'][:]
3219    y = fid.variables['y'][:]
3220    stage = fid.variables['stage'][:]
3221    #print "stage",stage
3222    #print "stage.shap",stage.shape
3223    #print "min(stage.flat), mpythonax(stage.flat)",min(stage.flat), max(stage.flat)
3224    #print "min(stage)",min(stage)
3225
3226    fid.close()
3227
3228    return [min(x),max(x),min(y),max(y),min(stage.flat),max(stage.flat)]
3229
3230
3231def sww2domain(filename, boundary=None, t=None,
3232               fail_if_NaN=True ,NaN_filler=0,
3233               verbose = False, very_verbose = False):
3234    """
3235    Usage: domain = sww2domain('file.sww',t=time (default = last time in file))
3236
3237    Boundary is not recommended if domain.smooth is not selected, as it
3238    uses unique coordinates, but not unique boundaries. This means that
3239    the boundary file will not be compatable with the coordinates, and will
3240    give a different final boundary, or crash.
3241    """
3242    NaN=9.969209968386869e+036
3243    #initialise NaN.
3244
3245    from Scientific.IO.NetCDF import NetCDFFile
3246    from shallow_water import Domain
3247    from Numeric import asarray, transpose, resize
3248
3249    if verbose: print 'Reading from ', filename
3250    fid = NetCDFFile(filename, 'r')    #Open existing file for read
3251    time = fid.variables['time']       #Timesteps
3252    if t is None:
3253        t = time[-1]
3254    time_interp = get_time_interp(time,t)
3255
3256    # Get the variables as Numeric arrays
3257    x = fid.variables['x'][:]             #x-coordinates of vertices
3258    y = fid.variables['y'][:]             #y-coordinates of vertices
3259    elevation = fid.variables['elevation']     #Elevation
3260    stage = fid.variables['stage']     #Water level
3261    xmomentum = fid.variables['xmomentum']   #Momentum in the x-direction
3262    ymomentum = fid.variables['ymomentum']   #Momentum in the y-direction
3263
3264    starttime = fid.starttime[0]
3265    volumes = fid.variables['volumes'][:] #Connectivity
3266    coordinates = transpose(asarray([x.tolist(),y.tolist()]))
3267    #FIXME (Ole): Something like this might be better: concatenate( (x, y), axis=1 )
3268    # or concatenate( (x[:,NewAxis],x[:,NewAxis]), axis=1 )
3269
3270    conserved_quantities = []
3271    interpolated_quantities = {}
3272    other_quantities = []
3273
3274    # get geo_reference
3275    #sww files don't have to have a geo_ref
3276    try:
3277        geo_reference = Geo_reference(NetCDFObject=fid)
3278    except: #AttributeError, e:
3279        geo_reference = None
3280
3281    if verbose: print '    getting quantities'
3282    for quantity in fid.variables.keys():
3283        dimensions = fid.variables[quantity].dimensions
3284        if 'number_of_timesteps' in dimensions:
3285            conserved_quantities.append(quantity)
3286            interpolated_quantities[quantity]=\
3287                  interpolated_quantity(fid.variables[quantity][:],time_interp)
3288        else: other_quantities.append(quantity)
3289
3290    other_quantities.remove('x')
3291    other_quantities.remove('y')
3292    other_quantities.remove('z')
3293    other_quantities.remove('volumes')
3294    try:
3295        other_quantities.remove('stage_range')
3296        other_quantities.remove('xmomentum_range')
3297        other_quantities.remove('ymomentum_range')
3298        other_quantities.remove('elevation_range')
3299    except:
3300        pass
3301       
3302
3303    conserved_quantities.remove('time')
3304
3305    if verbose: print '    building domain'
3306    #    From domain.Domain:
3307    #    domain = Domain(coordinates, volumes,\
3308    #                    conserved_quantities = conserved_quantities,\
3309    #                    other_quantities = other_quantities,zone=zone,\
3310    #                    xllcorner=xllcorner, yllcorner=yllcorner)
3311
3312    #   From shallow_water.Domain:
3313    coordinates=coordinates.tolist()
3314    volumes=volumes.tolist()
3315    #FIXME:should this be in mesh?(peter row)
3316    if fid.smoothing == 'Yes': unique = False
3317    else: unique = True
3318    if unique:
3319        coordinates,volumes,boundary=weed(coordinates,volumes,boundary)
3320
3321
3322    try:
3323        domain = Domain(coordinates, volumes, boundary)
3324    except AssertionError, e:
3325        fid.close()
3326        msg = 'Domain could not be created: %s. Perhaps use "fail_if_NaN=False and NaN_filler = ..."' %e
3327        raise DataDomainError, msg
3328
3329    if not boundary is None:
3330        domain.boundary = boundary
3331
3332    domain.geo_reference = geo_reference
3333
3334    domain.starttime=float(starttime)+float(t)
3335    domain.time=0.0
3336
3337    for quantity in other_quantities:
3338        try:
3339            NaN = fid.variables[quantity].missing_value
3340        except:
3341            pass #quantity has no missing_value number
3342        X = fid.variables[quantity][:]
3343        if very_verbose:
3344            print '       ',quantity
3345            print '        NaN =',NaN
3346            print '        max(X)'
3347            print '       ',max(X)
3348            print '        max(X)==NaN'
3349            print '       ',max(X)==NaN
3350            print ''
3351        if (max(X)==NaN) or (min(X)==NaN):
3352            if fail_if_NaN:
3353                msg = 'quantity "%s" contains no_data entry'%quantity
3354                raise DataMissingValuesError, msg
3355            else:
3356                data = (X<>NaN)
3357                X = (X*data)+(data==0)*NaN_filler
3358        if unique:
3359            X = resize(X,(len(X)/3,3))
3360        domain.set_quantity(quantity,X)
3361    #
3362    for quantity in conserved_quantities:
3363        try:
3364            NaN = fid.variables[quantity].missing_value
3365        except:
3366            pass #quantity has no missing_value number
3367        X = interpolated_quantities[quantity]
3368        if very_verbose:
3369            print '       ',quantity
3370            print '        NaN =',NaN
3371            print '        max(X)'
3372            print '       ',max(X)
3373            print '        max(X)==NaN'
3374            print '       ',max(X)==NaN
3375            print ''
3376        if (max(X)==NaN) or (min(X)==NaN):
3377            if fail_if_NaN:
3378                msg = 'quantity "%s" contains no_data entry'%quantity
3379                raise DataMissingValuesError, msg
3380            else:
3381                data = (X<>NaN)
3382                X = (X*data)+(data==0)*NaN_filler
3383        if unique:
3384            X = resize(X,(X.shape[0]/3,3))
3385        domain.set_quantity(quantity,X)
3386
3387    fid.close()
3388    return domain
3389
3390
3391def interpolated_quantity(saved_quantity,time_interp):
3392
3393    #given an index and ratio, interpolate quantity with respect to time.
3394    index,ratio = time_interp
3395    Q = saved_quantity
3396    if ratio > 0:
3397        q = (1-ratio)*Q[index]+ ratio*Q[index+1]
3398    else:
3399        q = Q[index]
3400    #Return vector of interpolated values
3401    return q
3402
3403
3404def get_time_interp(time,t=None):
3405    #Finds the ratio and index for time interpolation.
3406    #It is borrowed from previous abstract_2d_finite_volumes code.
3407    if t is None:
3408        t=time[-1]
3409        index = -1
3410        ratio = 0.
3411    else:
3412        T = time
3413        tau = t
3414        index=0
3415        msg = 'Time interval derived from file %s [%s:%s]'\
3416            %('FIXMEfilename', T[0], T[-1])
3417        msg += ' does not match model time: %s' %tau
3418        if tau < time[0]: raise DataTimeError, msg
3419        if tau > time[-1]: raise DataTimeError, msg
3420        while tau > time[index]: index += 1
3421        while tau < time[index]: index -= 1
3422        if tau == time[index]:
3423            #Protect against case where tau == time[-1] (last time)
3424            # - also works in general when tau == time[i]
3425            ratio = 0
3426        else:
3427            #t is now between index and index+1
3428            ratio = (tau - time[index])/(time[index+1] - time[index])
3429    return (index,ratio)
3430
3431
3432def weed(coordinates,volumes,boundary = None):
3433    if type(coordinates)==ArrayType:
3434        coordinates = coordinates.tolist()
3435    if type(volumes)==ArrayType:
3436        volumes = volumes.tolist()
3437
3438    unique = False
3439    point_dict = {}
3440    same_point = {}
3441    for i in range(len(coordinates)):
3442        point = tuple(coordinates[i])
3443        if point_dict.has_key(point):
3444            unique = True
3445            same_point[i]=point
3446            #to change all point i references to point j
3447        else:
3448            point_dict[point]=i
3449            same_point[i]=point
3450
3451    coordinates = []
3452    i = 0
3453    for point in point_dict.keys():
3454        point = tuple(point)
3455        coordinates.append(list(point))
3456        point_dict[point]=i
3457        i+=1
3458
3459
3460    for volume in volumes:
3461        for i in range(len(volume)):
3462            index = volume[i]
3463            if index>-1:
3464                volume[i]=point_dict[same_point[index]]
3465
3466    new_boundary = {}
3467    if not boundary is None:
3468        for segment in boundary.keys():
3469            point0 = point_dict[same_point[segment[0]]]
3470            point1 = point_dict[same_point[segment[1]]]
3471            label = boundary[segment]
3472            #FIXME should the bounday attributes be concaterated
3473            #('exterior, pond') or replaced ('pond')(peter row)
3474
3475            if new_boundary.has_key((point0,point1)):
3476                new_boundary[(point0,point1)]=new_boundary[(point0,point1)]#\
3477                                              #+','+label
3478
3479            elif new_boundary.has_key((point1,point0)):
3480                new_boundary[(point1,point0)]=new_boundary[(point1,point0)]#\
3481                                              #+','+label
3482            else: new_boundary[(point0,point1)]=label
3483
3484        boundary = new_boundary
3485
3486    return coordinates,volumes,boundary
3487
3488
3489def decimate_dem(basename_in, stencil, cellsize_new, basename_out=None,
3490                 verbose=False):
3491    """Read Digitial Elevation model from the following NetCDF format (.dem)
3492
3493    Example:
3494
3495    ncols         3121
3496    nrows         1800
3497    xllcorner     722000
3498    yllcorner     5893000
3499    cellsize      25
3500    NODATA_value  -9999
3501    138.3698 137.4194 136.5062 135.5558 ..........
3502
3503    Decimate data to cellsize_new using stencil and write to NetCDF dem format.
3504    """
3505
3506    import os
3507    from Scientific.IO.NetCDF import NetCDFFile
3508    from Numeric import Float, zeros, sum, reshape, equal
3509
3510    root = basename_in
3511    inname = root + '.dem'
3512
3513    #Open existing netcdf file to read
3514    infile = NetCDFFile(inname, 'r')
3515    if verbose: print 'Reading DEM from %s' %inname
3516
3517    #Read metadata
3518    ncols = infile.ncols[0]
3519    nrows = infile.nrows[0]
3520    xllcorner = infile.xllcorner[0]
3521    yllcorner = infile.yllcorner[0]
3522    cellsize = infile.cellsize[0]
3523    NODATA_value = infile.NODATA_value[0]
3524    zone = infile.zone[0]
3525    false_easting = infile.false_easting[0]
3526    false_northing = infile.false_northing[0]
3527    projection = infile.projection
3528    datum = infile.datum
3529    units = infile.units
3530
3531    dem_elevation = infile.variables['elevation']
3532
3533    #Get output file name
3534    if basename_out == None:
3535        outname = root + '_' + repr(cellsize_new) + '.dem'
3536    else:
3537        outname = basename_out + '.dem'
3538
3539    if verbose: print 'Write decimated NetCDF file to %s' %outname
3540
3541    #Determine some dimensions for decimated grid
3542    (nrows_stencil, ncols_stencil) = stencil.shape
3543    x_offset = ncols_stencil / 2
3544    y_offset = nrows_stencil / 2
3545    cellsize_ratio = int(cellsize_new / cellsize)
3546    ncols_new = 1 + (ncols - ncols_stencil) / cellsize_ratio
3547    nrows_new = 1 + (nrows - nrows_stencil) / cellsize_ratio
3548
3549    #Open netcdf file for output
3550    outfile = NetCDFFile(outname, 'w')
3551
3552    #Create new file
3553    outfile.institution = 'Geoscience Australia'
3554    outfile.description = 'NetCDF DEM format for compact and portable storage ' +\
3555                           'of spatial point data'
3556    #Georeferencing
3557    outfile.zone = zone
3558    outfile.projection = projection
3559    outfile.datum = datum
3560    outfile.units = units
3561
3562    outfile.cellsize = cellsize_new
3563    outfile.NODATA_value = NODATA_value
3564    outfile.false_easting = false_easting
3565    outfile.false_northing = false_northing
3566
3567    outfile.xllcorner = xllcorner + (x_offset * cellsize)
3568    outfile.yllcorner = yllcorner + (y_offset * cellsize)
3569    outfile.ncols = ncols_new
3570    outfile.nrows = nrows_new
3571
3572    # dimension definition
3573    outfile.createDimension('number_of_points', nrows_new*ncols_new)
3574
3575    # variable definition
3576    outfile.createVariable('elevation', Float, ('number_of_points',))
3577
3578    # Get handle to the variable
3579    elevation = outfile.variables['elevation']
3580
3581    dem_elevation_r = reshape(dem_elevation, (nrows, ncols))
3582
3583    #Store data
3584    global_index = 0
3585    for i in range(nrows_new):
3586        if verbose: print 'Processing row %d of %d' %(i, nrows_new)
3587        lower_index = global_index
3588        telev =  zeros(ncols_new, Float)
3589        local_index = 0
3590        trow = i * cellsize_ratio
3591
3592        for j in range(ncols_new):
3593            tcol = j * cellsize_ratio
3594            tmp = dem_elevation_r[trow:trow+nrows_stencil, tcol:tcol+ncols_stencil]
3595
3596            #if dem contains 1 or more NODATA_values set value in
3597            #decimated dem to NODATA_value, else compute decimated
3598            #value using stencil
3599            if sum(sum(equal(tmp, NODATA_value))) > 0:
3600                telev[local_index] = NODATA_value
3601            else:
3602                telev[local_index] = sum(sum(tmp * stencil))
3603
3604            global_index += 1
3605            local_index += 1
3606
3607        upper_index = global_index
3608
3609        elevation[lower_index:upper_index] = telev
3610
3611    assert global_index == nrows_new*ncols_new, 'index not equal to number of points'
3612
3613    infile.close()
3614    outfile.close()
3615
3616
3617
3618
3619def tsh2sww(filename, verbose=False): 
3620    """
3621    to check if a tsh/msh file 'looks' good.
3622    """
3623
3624
3625    if verbose == True:print 'Creating domain from', filename
3626    domain = pmesh_to_domain_instance(filename, Domain)
3627    if verbose == True:print "Number of triangles = ", len(domain)
3628
3629    domain.smooth = True
3630    domain.format = 'sww'   #Native netcdf visualisation format
3631    file_path, filename = path.split(filename)
3632    filename, ext = path.splitext(filename)
3633    domain.set_name(filename)   
3634    domain.reduction = mean
3635    if verbose == True:print "file_path",file_path
3636    if file_path == "":file_path = "."
3637    domain.set_datadir(file_path)
3638
3639    if verbose == True:
3640        print "Output written to " + domain.get_datadir() + sep + \
3641              domain.get_name() + "." + domain.format
3642    sww = get_dataobject(domain)
3643    sww.store_connectivity()
3644    sww.store_timestep()
3645
3646
3647def asc_csiro2sww(bath_dir,
3648                  elevation_dir,
3649                  ucur_dir,
3650                  vcur_dir,
3651                  sww_file,
3652                  minlat = None, maxlat = None,
3653                  minlon = None, maxlon = None,
3654                  zscale=1,
3655                  mean_stage = 0,
3656                  fail_on_NaN = True,
3657                  elevation_NaN_filler = 0,
3658                  bath_prefix='ba',
3659                  elevation_prefix='el',
3660                  verbose=False):
3661    """
3662    Produce an sww boundary file, from esri ascii data from CSIRO.
3663
3664    Also convert latitude and longitude to UTM. All coordinates are
3665    assumed to be given in the GDA94 datum.
3666
3667    assume:
3668    All files are in esri ascii format
3669
3670    4 types of information
3671    bathymetry
3672    elevation
3673    u velocity
3674    v velocity
3675
3676    Assumptions
3677    The metadata of all the files is the same
3678    Each type is in a seperate directory
3679    One bath file with extention .000
3680    The time period is less than 24hrs and uniform.
3681    """
3682    from Scientific.IO.NetCDF import NetCDFFile
3683
3684    from anuga.coordinate_transforms.redfearn import redfearn
3685
3686    precision = Float # So if we want to change the precision its done here
3687
3688    # go in to the bath dir and load the only file,
3689    bath_files = os.listdir(bath_dir)
3690
3691    bath_file = bath_files[0]
3692    bath_dir_file =  bath_dir + os.sep + bath_file
3693    bath_metadata,bath_grid =  _read_asc(bath_dir_file)
3694
3695    #Use the date.time of the bath file as a basis for
3696    #the start time for other files
3697    base_start = bath_file[-12:]
3698
3699    #go into the elevation dir and load the 000 file
3700    elevation_dir_file = elevation_dir  + os.sep + elevation_prefix \
3701                         + base_start
3702
3703    elevation_files = os.listdir(elevation_dir)
3704    ucur_files = os.listdir(ucur_dir)
3705    vcur_files = os.listdir(vcur_dir)
3706    elevation_files.sort()
3707    # the first elevation file should be the
3708    # file with the same base name as the bath data
3709    assert elevation_files[0] == 'el' + base_start
3710
3711    number_of_latitudes = bath_grid.shape[0]
3712    number_of_longitudes = bath_grid.shape[1]
3713    number_of_volumes = (number_of_latitudes-1)*(number_of_longitudes-1)*2
3714
3715    longitudes = [bath_metadata['xllcorner']+x*bath_metadata['cellsize'] \
3716                  for x in range(number_of_longitudes)]
3717    latitudes = [bath_metadata['yllcorner']+y*bath_metadata['cellsize'] \
3718                 for y in range(number_of_latitudes)]
3719
3720     # reverse order of lat, so the fist lat represents the first grid row
3721    latitudes.reverse()
3722
3723    kmin, kmax, lmin, lmax = _get_min_max_indexes(latitudes[:],longitudes[:],
3724                                                 minlat=minlat, maxlat=maxlat,
3725                                                 minlon=minlon, maxlon=maxlon)
3726
3727
3728    bath_grid = bath_grid[kmin:kmax,lmin:lmax]
3729    latitudes = latitudes[kmin:kmax]
3730    longitudes = longitudes[lmin:lmax]
3731    number_of_latitudes = len(latitudes)
3732    number_of_longitudes = len(longitudes)
3733    number_of_times = len(os.listdir(elevation_dir))
3734    number_of_points = number_of_latitudes*number_of_longitudes
3735    number_of_volumes = (number_of_latitudes-1)*(number_of_longitudes-1)*2
3736
3737    #Work out the times
3738    if len(elevation_files) > 1:
3739        # Assume: The time period is less than 24hrs.
3740        time_period = (int(elevation_files[1][-3:]) - \
3741                      int(elevation_files[0][-3:]))*60*60
3742        times = [x*time_period for x in range(len(elevation_files))]
3743    else:
3744        times = [0.0]
3745
3746
3747    if verbose:
3748        print '------------------------------------------------'
3749        print 'Statistics:'
3750        print '  Extent (lat/lon):'
3751        print '    lat in [%f, %f], len(lat) == %d'\
3752              %(min(latitudes), max(latitudes),
3753                len(latitudes))
3754        print '    lon in [%f, %f], len(lon) == %d'\
3755              %(min(longitudes), max(longitudes),
3756                len(longitudes))
3757        print '    t in [%f, %f], len(t) == %d'\
3758              %(min(times), max(times), len(times))
3759
3760    ######### WRITE THE SWW FILE #############
3761    # NetCDF file definition
3762    outfile = NetCDFFile(sww_file, 'w')
3763
3764    #Create new file
3765    outfile.institution = 'Geoscience Australia'
3766    outfile.description = 'Converted from XXX'
3767
3768
3769    #For sww compatibility
3770    outfile.smoothing = 'Yes'
3771    outfile.order = 1
3772
3773    #Start time in seconds since the epoch (midnight 1/1/1970)
3774    outfile.starttime = starttime = times[0]
3775
3776
3777    # dimension definitions
3778    outfile.createDimension('number_of_volumes', number_of_volumes)
3779
3780    outfile.createDimension('number_of_vertices', 3)
3781    outfile.createDimension('number_of_points', number_of_points)
3782    outfile.createDimension('number_of_timesteps', number_of_times)
3783
3784    # variable definitions
3785    outfile.createVariable('x', precision, ('number_of_points',))
3786    outfile.createVariable('y', precision, ('number_of_points',))
3787    outfile.createVariable('elevation', precision, ('number_of_points',))
3788
3789    #FIXME: Backwards compatibility
3790    outfile.createVariable('z', precision, ('number_of_points',))
3791    #################################
3792
3793    outfile.createVariable('volumes', Int, ('number_of_volumes',
3794                                            'number_of_vertices'))
3795
3796    outfile.createVariable('time', precision,
3797                           ('number_of_timesteps',))
3798
3799    outfile.createVariable('stage', precision,
3800                           ('number_of_timesteps',
3801                            'number_of_points'))
3802
3803    outfile.createVariable('xmomentum', precision,
3804                           ('number_of_timesteps',
3805                            'number_of_points'))
3806
3807    outfile.createVariable('ymomentum', precision,
3808                           ('number_of_timesteps',
3809                            'number_of_points'))
3810
3811    #Store
3812    from anuga.coordinate_transforms.redfearn import redfearn
3813    x = zeros(number_of_points, Float)  #Easting
3814    y = zeros(number_of_points, Float)  #Northing
3815
3816    if verbose: print 'Making triangular grid'
3817    #Get zone of 1st point.
3818    refzone, _, _ = redfearn(latitudes[0],longitudes[0])
3819
3820    vertices = {}
3821    i = 0
3822    for k, lat in enumerate(latitudes):
3823        for l, lon in enumerate(longitudes):
3824
3825            vertices[l,k] = i
3826
3827            zone, easting, northing = redfearn(lat,lon)
3828
3829            msg = 'Zone boundary crossed at longitude =', lon
3830            #assert zone == refzone, msg
3831            #print '%7.2f %7.2f %8.2f %8.2f' %(lon, lat, easting, northing)
3832            x[i] = easting
3833            y[i] = northing
3834            i += 1
3835
3836
3837    #Construct 2 triangles per 'rectangular' element
3838    volumes = []
3839    for l in range(number_of_longitudes-1):    #X direction
3840        for k in range(number_of_latitudes-1): #Y direction
3841            v1 = vertices[l,k+1]
3842            v2 = vertices[l,k]
3843            v3 = vertices[l+1,k+1]
3844            v4 = vertices[l+1,k]
3845
3846            #Note, this is different to the ferrit2sww code
3847            #since the order of the lats is reversed.
3848            volumes.append([v1,v3,v2]) #Upper element
3849            volumes.append([v4,v2,v3]) #Lower element
3850
3851    volumes = array(volumes)
3852
3853    geo_ref = Geo_reference(refzone,min(x),min(y))
3854    geo_ref.write_NetCDF(outfile)
3855
3856    # This will put the geo ref in the middle
3857    #geo_ref = Geo_reference(refzone,(max(x)+min(x))/2.0,(max(x)+min(y))/2.)
3858
3859
3860    if verbose:
3861        print '------------------------------------------------'
3862        print 'More Statistics:'
3863        print '  Extent (/lon):'
3864        print '    x in [%f, %f], len(lat) == %d'\
3865              %(min(x), max(x),
3866                len(x))
3867        print '    y in [%f, %f], len(lon) == %d'\
3868              %(min(y), max(y),
3869                len(y))
3870        print 'geo_ref: ',geo_ref
3871
3872    z = resize(bath_grid,outfile.variables['z'][:].shape)
3873    outfile.variables['x'][:] = x - geo_ref.get_xllcorner()
3874    outfile.variables['y'][:] = y - geo_ref.get_yllcorner()
3875    outfile.variables['z'][:] = z
3876    outfile.variables['elevation'][:] = z  #FIXME HACK
3877    outfile.variables['volumes'][:] = volumes.astype(Int32) #On Opteron 64
3878
3879    stage = outfile.variables['stage']
3880    xmomentum = outfile.variables['xmomentum']
3881    ymomentum = outfile.variables['ymomentum']
3882
3883    outfile.variables['time'][:] = times   #Store time relative
3884
3885    if verbose: print 'Converting quantities'
3886    n = number_of_times
3887    for j in range(number_of_times):
3888        # load in files
3889        elevation_meta, elevation_grid = \
3890           _read_asc(elevation_dir + os.sep + elevation_files[j])
3891
3892        _, u_momentum_grid =  _read_asc(ucur_dir + os.sep + ucur_files[j])
3893        _, v_momentum_grid =  _read_asc(vcur_dir + os.sep + vcur_files[j])
3894
3895        #cut matrix to desired size
3896        elevation_grid = elevation_grid[kmin:kmax,lmin:lmax]
3897        u_momentum_grid = u_momentum_grid[kmin:kmax,lmin:lmax]
3898        v_momentum_grid = v_momentum_grid[kmin:kmax,lmin:lmax]
3899       
3900        # handle missing values
3901        missing = (elevation_grid == elevation_meta['NODATA_value'])
3902        if sometrue (missing):
3903            if fail_on_NaN:
3904                msg = 'File %s contains missing values'\
3905                      %(elevation_files[j])
3906                raise DataMissingValuesError, msg
3907            else:
3908                elevation_grid = elevation_grid*(missing==0) + \
3909                                 missing*elevation_NaN_filler
3910
3911
3912        if verbose and j%((n+10)/10)==0: print '  Doing %d of %d' %(j, n)
3913        i = 0
3914        for k in range(number_of_latitudes):      #Y direction
3915            for l in range(number_of_longitudes): #X direction
3916                w = zscale*elevation_grid[k,l] + mean_stage
3917                stage[j,i] = w
3918                h = w - z[i]
3919                xmomentum[j,i] = u_momentum_grid[k,l]*h
3920                ymomentum[j,i] = v_momentum_grid[k,l]*h
3921                i += 1
3922    outfile.close()
3923
3924def _get_min_max_indexes(latitudes_ref,longitudes_ref,
3925                        minlat=None, maxlat=None,
3926                        minlon=None, maxlon=None):
3927    """
3928    return max, min indexes (for slicing) of the lat and long arrays to cover the area
3929    specified with min/max lat/long
3930
3931    Think of the latitudes and longitudes describing a 2d surface.
3932    The area returned is, if possible, just big enough to cover the
3933    inputed max/min area. (This will not be possible if the max/min area
3934    has a section outside of the latitudes/longitudes area.)
3935
3936    asset  longitudes are sorted,
3937    long - from low to high (west to east, eg 148 - 151)
3938    assert latitudes are sorted, ascending or decending
3939    """
3940    latitudes = latitudes_ref[:]
3941    longitudes = longitudes_ref[:]
3942
3943    latitudes = ensure_numeric(latitudes)
3944    longitudes = ensure_numeric(longitudes)
3945
3946    assert allclose(sort(longitudes), longitudes)
3947
3948    #print latitudes[0],longitudes[0]
3949    #print len(latitudes),len(longitudes)
3950    #print latitudes[len(latitudes)-1],longitudes[len(longitudes)-1]
3951   
3952    lat_ascending = True
3953    if not allclose(sort(latitudes), latitudes):
3954        lat_ascending = False
3955        # reverse order of lat, so it's in ascending order         
3956        latitudes = latitudes[::-1]
3957        assert allclose(sort(latitudes), latitudes)
3958    #print "latitudes  in funct", latitudes
3959   
3960    largest_lat_index = len(latitudes)-1
3961    #Cut out a smaller extent.
3962    if minlat == None:
3963        lat_min_index = 0
3964    else:
3965        lat_min_index = searchsorted(latitudes, minlat)-1
3966        if lat_min_index <0:
3967            lat_min_index = 0
3968
3969
3970    if maxlat == None:
3971        lat_max_index = largest_lat_index #len(latitudes)
3972    else:
3973        lat_max_index = searchsorted(latitudes, maxlat)
3974        if lat_max_index > largest_lat_index:
3975            lat_max_index = largest_lat_index
3976
3977    if minlon == None:
3978        lon_min_index = 0
3979    else:
3980        lon_min_index = searchsorted(longitudes, minlon)-1
3981        if lon_min_index <0:
3982            lon_min_index = 0
3983
3984    if maxlon == None:
3985        lon_max_index = len(longitudes)
3986    else:
3987        lon_max_index = searchsorted(longitudes, maxlon)
3988
3989    # Reversing the indexes, if the lat array is decending
3990    if lat_ascending is False:
3991        lat_min_index, lat_max_index = largest_lat_index - lat_max_index , \
3992                                       largest_lat_index - lat_min_index
3993    lat_max_index = lat_max_index + 1 # taking into account how slicing works
3994    lon_max_index = lon_max_index + 1 # taking into account how slicing works
3995
3996    return lat_min_index, lat_max_index, lon_min_index, lon_max_index
3997
3998
3999def _read_asc(filename, verbose=False):
4000    """Read esri file from the following ASCII format (.asc)
4001
4002    Example:
4003
4004    ncols         3121
4005    nrows         1800
4006    xllcorner     722000
4007    yllcorner     5893000
4008    cellsize      25
4009    NODATA_value  -9999
4010    138.3698 137.4194 136.5062 135.5558 ..........
4011
4012    """
4013
4014    datafile = open(filename)
4015
4016    if verbose: print 'Reading DEM from %s' %(filename)
4017    lines = datafile.readlines()
4018    datafile.close()
4019
4020    if verbose: print 'Got', len(lines), ' lines'
4021
4022    ncols = int(lines.pop(0).split()[1].strip())
4023    nrows = int(lines.pop(0).split()[1].strip())
4024    xllcorner = float(lines.pop(0).split()[1].strip())
4025    yllcorner = float(lines.pop(0).split()[1].strip())
4026    cellsize = float(lines.pop(0).split()[1].strip())
4027    NODATA_value = float(lines.pop(0).split()[1].strip())
4028
4029    assert len(lines) == nrows
4030
4031    #Store data
4032    grid = []
4033
4034    n = len(lines)
4035    for i, line in enumerate(lines):
4036        cells = line.split()
4037        assert len(cells) == ncols
4038        grid.append(array([float(x) for x in cells]))
4039    grid = array(grid)
4040
4041    return {'xllcorner':xllcorner,
4042            'yllcorner':yllcorner,
4043            'cellsize':cellsize,
4044            'NODATA_value':NODATA_value}, grid
4045
4046
4047
4048    ####  URS 2 SWW  ###
4049
4050lon_name = 'LON'
4051lat_name = 'LAT'
4052time_name = 'TIME'
4053precision = Float # So if we want to change the precision its done here       
4054class Write_nc:
4055    """
4056    Write an nc file.
4057   
4058    Note, this should be checked to meet cdc netcdf conventions for gridded
4059    data. http://www.cdc.noaa.gov/cdc/conventions/cdc_netcdf_standard.shtml
4060   
4061    """
4062    def __init__(self,
4063                 quantity_name,
4064                 file_name,
4065                 time_step_count,
4066                 time_step,
4067                 lon,
4068                 lat):
4069        """
4070        time_step_count is the number of time steps.
4071        time_step is the time step size
4072       
4073        pre-condition: quantity_name must be 'HA' 'UA'or 'VA'.
4074        """
4075        self.quantity_name = quantity_name
4076        quantity_units = {'HA':'CENTIMETERS',
4077                              'UA':'CENTIMETERS/SECOND',
4078                              'VA':'CENTIMETERS/SECOND'}       
4079       
4080        multiplier_dic = {'HA':100.0, # To convert from m to cm
4081                              'UA':100.0,  #  m/s to cm/sec
4082                              'VA':-100.0}  # MUX files have positve x in the
4083        # Southern direction.  This corrects for it, when writing nc files.
4084       
4085        self.quantity_multiplier =  multiplier_dic[self.quantity_name]
4086       
4087        #self.file_name = file_name
4088        self.time_step_count = time_step_count
4089        self.time_step = time_step
4090
4091        # NetCDF file definition
4092        self.outfile = NetCDFFile(file_name, 'w')
4093        outfile = self.outfile       
4094
4095        #Create new file
4096        nc_lon_lat_header(outfile, lon, lat)
4097   
4098        # TIME
4099        outfile.createDimension(time_name, None)
4100        outfile.createVariable(time_name, precision, (time_name,))
4101
4102        #QUANTITY
4103        outfile.createVariable(self.quantity_name, precision,
4104                               (time_name, lat_name, lon_name))
4105        outfile.variables[self.quantity_name].missing_value=-1.e+034
4106        outfile.variables[self.quantity_name].units= \
4107                                 quantity_units[self.quantity_name]
4108        outfile.variables[lon_name][:]= ensure_numeric(lon)
4109        outfile.variables[lat_name][:]= ensure_numeric(lat)
4110
4111        #Assume no one will be wanting to read this, while we are writing
4112        #outfile.close()
4113       
4114    def store_timestep(self, quantity_slice):
4115        """
4116        Write a time slice of quantity info
4117        quantity_slice is the data to be stored at this time step
4118        """
4119       
4120        outfile = self.outfile
4121       
4122        # Get the variables
4123        time = outfile.variables[time_name]
4124        quantity = outfile.variables[self.quantity_name]
4125           
4126        i = len(time)
4127
4128        #Store time
4129        time[i] = i*self.time_step #self.domain.time
4130        quantity[i,:] = quantity_slice* self.quantity_multiplier
4131       
4132    def close(self):
4133        self.outfile.close()
4134
4135def urs2sww(basename_in='o', basename_out=None, verbose=False,
4136            remove_nc_files=True,
4137            minlat=None, maxlat=None,
4138            minlon= None, maxlon=None,
4139            mint=None, maxt=None,
4140            mean_stage=0,
4141            origin = None,
4142            zscale=1,
4143            fail_on_NaN=True,
4144            NaN_filler=0,
4145            elevation=None):
4146    """
4147    Convert URS C binary format for wave propagation to
4148    sww format native to abstract_2d_finite_volumes.
4149
4150    Specify only basename_in and read files of the form
4151    basefilename-z-mux2, basefilename-e-mux2 and
4152    basefilename-n-mux2 containing relative height,
4153    x-velocity and y-velocity, respectively.
4154
4155    Also convert latitude and longitude to UTM. All coordinates are
4156    assumed to be given in the GDA94 datum. The latitude and longitude
4157    information is for  a grid.
4158
4159    min's and max's: If omitted - full extend is used.
4160    To include a value min may equal it, while max must exceed it.
4161    Lat and lon are assumed to be in decimal degrees.
4162    NOTE: minlon is the most east boundary.
4163   
4164    origin is a 3-tuple with geo referenced
4165    UTM coordinates (zone, easting, northing)
4166    It will be the origin of the sww file. This shouldn't be used,
4167    since all of anuga should be able to handle an arbitary origin.
4168
4169
4170    URS C binary format has data orgainised as TIME, LONGITUDE, LATITUDE
4171    which means that latitude is the fastest
4172    varying dimension (row major order, so to speak)
4173
4174    In URS C binary the latitudes and longitudes are in assending order.
4175    """
4176    if basename_out == None:
4177        basename_out = basename_in
4178    files_out = urs2nc(basename_in, basename_out)
4179    ferret2sww(basename_out,
4180               minlat=minlat,
4181               maxlat=maxlat,
4182               minlon=minlon,
4183               maxlon=maxlon,
4184               mint=mint,
4185               maxt=maxt,
4186               mean_stage=mean_stage,
4187               origin=origin,
4188               zscale=zscale,
4189               fail_on_NaN=fail_on_NaN,
4190               NaN_filler=NaN_filler,
4191               inverted_bathymetry=True,
4192               verbose=verbose)
4193    #print "files_out",files_out
4194    if remove_nc_files:
4195        for file_out in files_out:
4196            os.remove(file_out)
4197   
4198def urs2nc(basename_in = 'o', basename_out = 'urs'):
4199    """
4200    Convert the 3 urs files to 4 nc files.
4201
4202    The name of the urs file names must be;
4203    [basename_in]-z-mux
4204    [basename_in]-e-mux
4205    [basename_in]-n-mux
4206   
4207    """
4208   
4209    files_in = [basename_in + WAVEHEIGHT_MUX_LABEL,
4210                basename_in + EAST_VELOCITY_LABEL,
4211                basename_in + NORTH_VELOCITY_LABEL]
4212    files_out = [basename_out+'_ha.nc',
4213                 basename_out+'_ua.nc',
4214                 basename_out+'_va.nc']
4215    quantities = ['HA','UA','VA']
4216
4217    #if os.access(files_in[0]+'.mux', os.F_OK) == 0 :
4218    for i, file_name in enumerate(files_in):
4219        if os.access(file_name, os.F_OK) == 0:
4220            if os.access(file_name+'.mux', os.F_OK) == 0 :
4221                msg = 'File %s does not exist or is not accessible' %file_name
4222                raise IOError, msg
4223            else:
4224               files_in[i] += '.mux'
4225               print "file_name", file_name
4226    hashed_elevation = None
4227    for file_in, file_out, quantity in map(None, files_in,
4228                                           files_out,
4229                                           quantities):
4230        lonlatdep, lon, lat, depth = _binary_c2nc(file_in,
4231                                         file_out,
4232                                         quantity)
4233        #print "lonlatdep", lonlatdep
4234        if hashed_elevation == None:
4235            elevation_file = basename_out+'_e.nc'
4236            write_elevation_nc(elevation_file,
4237                                lon,
4238                                lat,
4239                                depth)
4240            hashed_elevation = myhash(lonlatdep)
4241        else:
4242            msg = "The elevation information in the mux files is inconsistent"
4243            assert hashed_elevation == myhash(lonlatdep), msg
4244    files_out.append(elevation_file)
4245    return files_out
4246   
4247def _binary_c2nc(file_in, file_out, quantity):
4248    """
4249    Reads in a quantity urs file and writes a quantity nc file.
4250    additionally, returns the depth and lat, long info,
4251    so it can be written to a file.
4252    """
4253    columns = 3 # long, lat , depth
4254    mux_file = open(file_in, 'rb')
4255   
4256    # Number of points/stations
4257    (points_num,)= unpack('i',mux_file.read(4))
4258
4259    # nt, int - Number of time steps
4260    (time_step_count,)= unpack('i',mux_file.read(4))
4261
4262    #dt, float - time step, seconds
4263    (time_step,) = unpack('f', mux_file.read(4))
4264   
4265    msg = "Bad data in the mux file."
4266    if points_num < 0:
4267        mux_file.close()
4268        raise ANUGAError, msg
4269    if time_step_count < 0:
4270        mux_file.close()
4271        raise ANUGAError, msg
4272    if time_step < 0:
4273        mux_file.close()
4274        raise ANUGAError, msg
4275   
4276    lonlatdep = p_array.array('f')
4277    lonlatdep.read(mux_file, columns * points_num)
4278    lonlatdep = array(lonlatdep, typecode=Float)   
4279    lonlatdep = reshape(lonlatdep, (points_num, columns))
4280   
4281    lon, lat, depth = lon_lat2grid(lonlatdep)
4282    lon_sorted = list(lon)
4283    lon_sorted.sort()
4284
4285    if not lon == lon_sorted:
4286        msg = "Longitudes in mux file are not in ascending order"
4287        raise IOError, msg
4288    lat_sorted = list(lat)
4289    lat_sorted.sort()
4290
4291    if not lat == lat_sorted:
4292        msg = "Latitudes in mux file are not in ascending order"
4293   
4294    nc_file = Write_nc(quantity,
4295                       file_out,
4296                       time_step_count,
4297                       time_step,
4298                       lon,
4299                       lat)
4300
4301    for i in range(time_step_count):
4302        #Read in a time slice  from mux file 
4303        hz_p_array = p_array.array('f')
4304        hz_p_array.read(mux_file, points_num)
4305        hz_p = array(hz_p_array, typecode=Float)
4306        hz_p = reshape(hz_p, (len(lon), len(lat)))
4307        hz_p = transpose(hz_p) #mux has lat varying fastest, nc has long v.f.
4308
4309        #write time slice to nc file
4310        nc_file.store_timestep(hz_p)
4311    mux_file.close()
4312    nc_file.close()
4313
4314    return lonlatdep, lon, lat, depth
4315   
4316
4317def write_elevation_nc(file_out, lon, lat, depth_vector):
4318    """
4319    Write an nc elevation file.
4320    """
4321   
4322    # NetCDF file definition
4323    outfile = NetCDFFile(file_out, 'w')
4324
4325    #Create new file
4326    nc_lon_lat_header(outfile, lon, lat)
4327   
4328    # ELEVATION
4329    zname = 'ELEVATION'
4330    outfile.createVariable(zname, precision, (lat_name, lon_name))
4331    outfile.variables[zname].units='CENTIMETERS'
4332    outfile.variables[zname].missing_value=-1.e+034
4333
4334    outfile.variables[lon_name][:]= ensure_numeric(lon)
4335    outfile.variables[lat_name][:]= ensure_numeric(lat)
4336
4337    depth = reshape(depth_vector, ( len(lat), len(lon)))
4338    outfile.variables[zname][:]= depth
4339   
4340    outfile.close()
4341   
4342def nc_lon_lat_header(outfile, lon, lat):
4343    """
4344    outfile is the netcdf file handle.
4345    lon - a list/array of the longitudes
4346    lat - a list/array of the latitudes
4347    """
4348   
4349    outfile.institution = 'Geoscience Australia'
4350    outfile.description = 'Converted from URS binary C'
4351   
4352    # Longitude
4353    outfile.createDimension(lon_name, len(lon))
4354    outfile.createVariable(lon_name, precision, (lon_name,))
4355    outfile.variables[lon_name].point_spacing='uneven'
4356    outfile.variables[lon_name].units='degrees_east'
4357    outfile.variables[lon_name].assignValue(lon)
4358
4359
4360    # Latitude
4361    outfile.createDimension(lat_name, len(lat))
4362    outfile.createVariable(lat_name, precision, (lat_name,))
4363    outfile.variables[lat_name].point_spacing='uneven'
4364    outfile.variables[lat_name].units='degrees_north'
4365    outfile.variables[lat_name].assignValue(lat)
4366
4367
4368   
4369def lon_lat2grid(long_lat_dep):
4370    """
4371    given a list of points that are assumed to be an a grid,
4372    return the long's and lat's of the grid.
4373    long_lat_dep is an array where each row is a position.
4374    The first column is longitudes.
4375    The second column is latitudes.
4376
4377    The latitude is the fastest varying dimension - in mux files
4378    """
4379    LONG = 0
4380    LAT = 1
4381    QUANTITY = 2
4382
4383    long_lat_dep = ensure_numeric(long_lat_dep, Float)
4384   
4385    num_points = long_lat_dep.shape[0]
4386    this_rows_long = long_lat_dep[0,LONG]
4387
4388    # Count the length of unique latitudes
4389    i = 0
4390    while long_lat_dep[i,LONG] == this_rows_long and i < num_points:
4391        i += 1
4392    # determine the lats and longsfrom the grid
4393    lat = long_lat_dep[:i, LAT]       
4394    long = long_lat_dep[::i, LONG]
4395   
4396    lenlong = len(long)
4397    lenlat = len(lat)
4398    #print 'len lat', lat, len(lat)
4399    #print 'len long', long, len(long)
4400         
4401    msg = 'Input data is not gridded'     
4402    assert num_points % lenlat == 0, msg
4403    assert num_points % lenlong == 0, msg
4404         
4405    # Test that data is gridded       
4406    for i in range(lenlong):
4407        msg = 'Data is not gridded.  It must be for this operation'
4408        first = i*lenlat
4409        last = first + lenlat
4410               
4411        assert allclose(long_lat_dep[first:last,LAT], lat), msg
4412        assert allclose(long_lat_dep[first:last,LONG], long[i]), msg
4413   
4414   
4415#    print 'range long', min(long), max(long)
4416#    print 'range lat', min(lat), max(lat)
4417#    print 'ref long', min(long_lat_dep[:,0]), max(long_lat_dep[:,0])
4418#    print 'ref lat', min(long_lat_dep[:,1]), max(long_lat_dep[:,1])
4419   
4420   
4421   
4422    msg = 'Out of range latitudes/longitudes'
4423    for l in lat:assert -90 < l < 90 , msg
4424    for l in long:assert -180 < l < 180 , msg
4425
4426    # Changing quantity from lat being the fastest varying dimension to
4427    # long being the fastest varying dimension
4428    # FIXME - make this faster/do this a better way
4429    # use numeric transpose, after reshaping the quantity vector
4430#    quantity = zeros(len(long_lat_dep), Float)
4431    quantity = zeros(num_points, Float)
4432   
4433#    print 'num',num_points
4434    for lat_i, _ in enumerate(lat):
4435        for long_i, _ in enumerate(long):
4436            q_index = lat_i*lenlong+long_i
4437            lld_index = long_i*lenlat+lat_i
4438#            print 'lat_i', lat_i, 'long_i',long_i, 'q_index', q_index, 'lld_index', lld_index
4439            temp = long_lat_dep[lld_index, QUANTITY]
4440            quantity[q_index] = temp
4441           
4442    return long, lat, quantity
4443
4444####  END URS 2 SWW  ###
4445
4446#### URS UNGRIDDED 2 SWW ###
4447
4448### PRODUCING THE POINTS NEEDED FILE ###
4449
4450# Ones used for FESA 2007 results
4451#LL_LAT = -50.0
4452#LL_LONG = 80.0
4453#GRID_SPACING = 1.0/60.0
4454#LAT_AMOUNT = 4800
4455#LONG_AMOUNT = 3600
4456
4457def URS_points_needed_to_file(file_name, boundary_polygon, zone,
4458                              ll_lat, ll_long,
4459                              grid_spacing, 
4460                              lat_amount, long_amount,
4461                              isSouthernHemisphere=True,
4462                              export_csv=False, use_cache=False,
4463                              verbose=False):
4464    """
4465    Given the info to replicate the URS grid and a polygon output
4466    a file that specifies the cloud of boundary points for URS.
4467
4468    This creates a .urs file.  This is in the format used by URS;
4469    1st line is the number of points,
4470    each line after represents a point,in lats and longs.
4471   
4472    Note: The polygon cannot cross zones or hemispheres.
4473
4474    A work-a-round for different zones or hemispheres is to run this twice,
4475    once for each zone, and then combine the output.
4476   
4477    file_name - name of the urs file produced for David.
4478    boundary_polygon - a list of points that describes a polygon.
4479                      The last point is assumed ot join the first point.
4480                      This is in UTM (lat long would be better though)
4481
4482     This is info about the URS model that needs to be inputted.
4483     
4484    ll_lat - lower left latitude, in decimal degrees
4485    ll-long - lower left longitude, in decimal degrees
4486    grid_spacing - in deciamal degrees
4487    lat_amount - number of latitudes
4488    long_amount- number of longs
4489
4490
4491    Don't add the file extension.  It will be added.
4492    """
4493    geo = URS_points_needed(boundary_polygon, zone, ll_lat, ll_long,
4494                            grid_spacing, 
4495                            lat_amount, long_amount, isSouthernHemisphere,
4496                            use_cache, verbose)
4497    if not file_name[-4:] == ".urs":
4498        file_name += ".urs"
4499    geo.export_points_file(file_name, isSouthHemisphere=isSouthernHemisphere)
4500    if export_csv:
4501        if file_name[-4:] == ".urs":
4502            file_name = file_name[:-4] + ".csv"
4503        geo.export_points_file(file_name)
4504    return geo
4505
4506def URS_points_needed(boundary_polygon, zone, ll_lat,
4507                      ll_long, grid_spacing, 
4508                      lat_amount, long_amount, isSouthHemisphere=True,
4509                      use_cache=False, verbose=False):
4510    args = (boundary_polygon,
4511            zone, ll_lat,
4512            ll_long, grid_spacing, 
4513            lat_amount, long_amount, isSouthHemisphere)
4514    kwargs = {} 
4515    if use_cache is True:
4516        try:
4517            from anuga.caching import cache
4518        except:
4519            msg = 'Caching was requested, but caching module'+\
4520                  'could not be imported'
4521            raise msg
4522
4523
4524        geo = cache(_URS_points_needed,
4525                  args, kwargs,
4526                  verbose=verbose,
4527                  compression=False)
4528    else:
4529        geo = apply(_URS_points_needed, args, kwargs)
4530
4531    return geo
4532
4533def _URS_points_needed(boundary_polygon,
4534                      zone, ll_lat,
4535                      ll_long, grid_spacing, 
4536                      lat_amount, long_amount,
4537                       isSouthHemisphere):
4538    """
4539    boundary_polygon - a list of points that describes a polygon.
4540                      The last point is assumed ot join the first point.
4541                      This is in UTM (lat long would b better though)
4542
4543    ll_lat - lower left latitude, in decimal degrees
4544    ll-long - lower left longitude, in decimal degrees
4545    grid_spacing - in deciamal degrees
4546
4547    """
4548   
4549    from sets import ImmutableSet
4550   
4551    msg = "grid_spacing can not be zero"
4552    assert not grid_spacing == 0, msg
4553    a = boundary_polygon
4554    # List of segments.  Each segment is two points.
4555    segs = [i and [a[i-1], a[i]] or [a[len(a)-1], a[0]] for i in range(len(a))]
4556    # convert the segs to Lat's and longs.
4557   
4558    # Don't assume the zone of the segments is the same as the lower left
4559    # corner of the lat long data!!  They can easily be in different zones
4560   
4561    lat_long_set = ImmutableSet()
4562    for seg in segs:
4563        points_lat_long = points_needed(seg, ll_lat, ll_long, grid_spacing, 
4564                      lat_amount, long_amount, zone, isSouthHemisphere)
4565        lat_long_set |= ImmutableSet(points_lat_long)
4566    if lat_long_set == ImmutableSet([]):
4567        msg = """URS region specified and polygon does not overlap."""
4568        raise ValueError, msg
4569
4570    # Warning there is no info in geospatial saying the hemisphere of
4571    # these points.  There should be.
4572    geo = Geospatial_data(data_points=list(lat_long_set),
4573                              points_are_lats_longs=True)
4574    return geo
4575   
4576def points_needed(seg, ll_lat, ll_long, grid_spacing, 
4577                  lat_amount, long_amount, zone,
4578                  isSouthHemisphere):
4579    """
4580    seg is two points, in UTM
4581    return a list of the points, in lats and longs that are needed to
4582    interpolate any point on the segment.
4583    """
4584    from math import sqrt
4585    #print "zone",zone
4586    geo_reference = Geo_reference(zone=zone)
4587    #print "seg", seg
4588    geo = Geospatial_data(seg,geo_reference=geo_reference)
4589    seg_lat_long = geo.get_data_points(as_lat_long=True,
4590                                       isSouthHemisphere=isSouthHemisphere)
4591    #print "seg_lat_long", seg_lat_long
4592    # 1.415 = 2^0.5, rounded up....
4593    sqrt_2_rounded_up = 1.415
4594    buffer = sqrt_2_rounded_up * grid_spacing
4595   
4596    max_lat = max(seg_lat_long[0][0], seg_lat_long[1][0]) + buffer
4597    max_long = max(seg_lat_long[0][1], seg_lat_long[1][1]) + buffer
4598    min_lat = min(seg_lat_long[0][0], seg_lat_long[1][0]) - buffer
4599    min_long = min(seg_lat_long[0][1], seg_lat_long[1][1]) - buffer
4600
4601    #print "min_long", min_long
4602    #print "ll_long", ll_long
4603    #print "grid_spacing", grid_spacing
4604    first_row = (min_long - ll_long)/grid_spacing
4605    # To round up
4606    first_row_long = int(round(first_row + 0.5))
4607    #print "first_row", first_row_long
4608
4609    last_row = (max_long - ll_long)/grid_spacing # round down
4610    last_row_long = int(round(last_row))
4611    #print "last_row",last_row _long
4612   
4613    first_row = (min_lat - ll_lat)/grid_spacing
4614    # To round up
4615    first_row_lat = int(round(first_row + 0.5))
4616    #print "first_row", first_row_lat
4617
4618    last_row = (max_lat - ll_lat)/grid_spacing # round down
4619    last_row_lat = int(round(last_row))
4620    #print "last_row",last_row_lat
4621
4622    # to work out the max distance -
4623    # 111120 - horizontal distance between 1 deg latitude.
4624    #max_distance = sqrt_2_rounded_up * 111120 * grid_spacing
4625    max_distance = 157147.4112 * grid_spacing
4626    #print "max_distance", max_distance #2619.12 m for 1 minute
4627    points_lat_long = []
4628    # Create a list of the lat long points to include.
4629    for index_lat in range(first_row_lat, last_row_lat + 1):
4630        for index_long in range(first_row_long, last_row_long + 1):
4631            #print "index_lat", index_lat
4632            #print "index_long", index_long
4633            lat = ll_lat + index_lat*grid_spacing
4634            long = ll_long + index_long*grid_spacing
4635
4636            #filter here to keep good points
4637            if keep_point(lat, long, seg, max_distance):
4638                points_lat_long.append((lat, long)) #must be hashable
4639    #print "points_lat_long", points_lat_long
4640
4641    # Now that we have these points, lets throw ones out that are too far away
4642    return points_lat_long
4643
4644def keep_point(lat, long, seg, max_distance):
4645    """
4646    seg is two points, UTM
4647    """
4648    from math import sqrt
4649    _ , x0, y0 = redfearn(lat, long)
4650    x1 = seg[0][0]
4651    y1 = seg[0][1]
4652    x2 = seg[1][0]
4653    y2 = seg[1][1]
4654    x2_1 = x2-x1
4655    y2_1 = y2-y1
4656    try:
4657        d = abs((x2_1)*(y1-y0)-(x1-x0)*(y2_1))/sqrt( \
4658            (x2_1)*(x2_1)+(y2_1)*(y2_1))
4659    except ZeroDivisionError:
4660        #print "seg", seg
4661        #print "x0", x0
4662        #print "y0", y0
4663        if sqrt((x2_1)*(x2_1)+(y2_1)*(y2_1)) == 0 and \
4664           abs((x2_1)*(y1-y0)-(x1-x0)*(y2_1)) == 0:
4665            return True
4666        else:
4667            return False
4668   
4669    if d <= max_distance:
4670        return True
4671    else:
4672        return False
4673   
4674    #### CONVERTING UNGRIDDED URS DATA TO AN SWW FILE ####
4675   
4676WAVEHEIGHT_MUX_LABEL = '-z-mux'
4677EAST_VELOCITY_LABEL =  '-e-mux'
4678NORTH_VELOCITY_LABEL =  '-n-mux' 
4679def urs_ungridded2sww(basename_in='o', basename_out=None, verbose=False,
4680                      mint=None, maxt=None,
4681                      mean_stage=0,
4682                      origin=None,
4683                      hole_points_UTM=None,
4684                      zscale=1):
4685    """   
4686    Convert URS C binary format for wave propagation to
4687    sww format native to abstract_2d_finite_volumes.
4688
4689
4690    Specify only basename_in and read files of the form
4691    basefilename-z-mux, basefilename-e-mux and
4692    basefilename-n-mux containing relative height,
4693    x-velocity and y-velocity, respectively.
4694
4695    Also convert latitude and longitude to UTM. All coordinates are
4696    assumed to be given in the GDA94 datum. The latitude and longitude
4697    information is assumed ungridded grid.
4698
4699    min's and max's: If omitted - full extend is used.
4700    To include a value min ans max may equal it.
4701    Lat and lon are assumed to be in decimal degrees.
4702   
4703    origin is a 3-tuple with geo referenced
4704    UTM coordinates (zone, easting, northing)
4705    It will be the origin of the sww file. This shouldn't be used,
4706    since all of anuga should be able to handle an arbitary origin.
4707    The mux point info is NOT relative to this origin.
4708
4709
4710    URS C binary format has data orgainised as TIME, LONGITUDE, LATITUDE
4711    which means that latitude is the fastest
4712    varying dimension (row major order, so to speak)
4713
4714    In URS C binary the latitudes and longitudes are in assending order.
4715
4716    Note, interpolations of the resulting sww file will be different
4717    from results of urs2sww.  This is due to the interpolation
4718    function used, and the different grid structure between urs2sww
4719    and this function.
4720   
4721    Interpolating data that has an underlying gridded source can
4722    easily end up with different values, depending on the underlying
4723    mesh.
4724
4725    consider these 4 points
4726    50  -50
4727
4728    0     0
4729
4730    The grid can be
4731     -
4732    |\|    A
4733     -
4734     or;
4735      -
4736     |/|   B
4737      -
4738      If a point is just below the center of the midpoint, it will have a
4739      +ve value in grid A and a -ve value in grid B.
4740    """ 
4741    from anuga.mesh_engine.mesh_engine import NoTrianglesError
4742    from anuga.pmesh.mesh import Mesh
4743
4744    files_in = [basename_in + WAVEHEIGHT_MUX_LABEL,
4745                basename_in + EAST_VELOCITY_LABEL,
4746                basename_in + NORTH_VELOCITY_LABEL]
4747    quantities = ['HA','UA','VA']
4748
4749    # instanciate urs_points of the three mux files.
4750    mux = {}
4751    for quantity, file in map(None, quantities, files_in):
4752        mux[quantity] = Urs_points(file)
4753       
4754    # Could check that the depth is the same. (hashing)
4755
4756    # handle to a mux file to do depth stuff
4757    a_mux = mux[quantities[0]]
4758   
4759    # Convert to utm
4760    lat = a_mux.lonlatdep[:,1]
4761    long = a_mux.lonlatdep[:,0]
4762    points_utm, zone = convert_from_latlon_to_utm( \
4763        latitudes=lat, longitudes=long)
4764    #print "points_utm", points_utm
4765    #print "zone", zone
4766
4767    elevation = a_mux.lonlatdep[:,2] * -1 #
4768   
4769    # grid ( create a mesh from the selected points)
4770    # This mesh has a problem.  Triangles are streched over ungridded areas.
4771    #  If these areas could be described as holes in pmesh, that would be great
4772
4773    # I can't just get the user to selection a point in the middle.
4774    # A boundary is needed around these points.
4775    # But if the zone of points is obvious enough auto-segment should do
4776    # a good boundary.
4777    mesh = Mesh()
4778    mesh.add_vertices(points_utm)
4779    mesh.auto_segment(smooth_indents=True, expand_pinch=True)
4780    # To try and avoid alpha shape 'hugging' too much
4781    mesh.auto_segment( mesh.shape.get_alpha()*1.1 )
4782    if hole_points_UTM is not None:
4783        point = ensure_absolute(hole_points_UTM)
4784        mesh.add_hole(point[0], point[1])
4785    try:
4786        mesh.generate_mesh(minimum_triangle_angle=0.0, verbose=False)
4787    except NoTrianglesError:
4788        # This is a bit of a hack, going in and changing the
4789        # data structure.
4790        mesh.holes = []
4791        mesh.generate_mesh(minimum_triangle_angle=0.0, verbose=False)
4792    mesh_dic = mesh.Mesh2MeshList()
4793
4794    #mesh.export_mesh_file(basename_in + '_168.tsh')
4795    #import sys; sys.exit()
4796    # These are the times of the mux file
4797    mux_times = []
4798    for i in range(a_mux.time_step_count):
4799        mux_times.append(a_mux.time_step * i) 
4800    mux_times_start_i, mux_times_fin_i = mux2sww_time(mux_times, mint, maxt)
4801    times = mux_times[mux_times_start_i:mux_times_fin_i]
4802   
4803    if mux_times_start_i == mux_times_fin_i:
4804        # Close the mux files
4805        for quantity, file in map(None, quantities, files_in):
4806            mux[quantity].close()
4807        msg="Due to mint and maxt there's no time info in the boundary SWW."
4808        raise Exception, msg
4809       
4810    # If this raise is removed there is currently no downstream errors
4811           
4812    points_utm=ensure_numeric(points_utm)
4813    assert ensure_numeric(mesh_dic['generatedpointlist']) == \
4814           ensure_numeric(points_utm)
4815   
4816    volumes = mesh_dic['generatedtrianglelist']
4817   
4818    # write sww intro and grid stuff.   
4819    if basename_out is None:
4820        swwname = basename_in + '.sww'
4821    else:
4822        swwname = basename_out + '.sww'
4823
4824    if verbose: print 'Output to ', swwname
4825    outfile = NetCDFFile(swwname, 'w')
4826    # For a different way of doing this, check out tsh2sww
4827    # work out sww_times and the index range this covers
4828    sww = Write_sww()
4829    sww.store_header(outfile, times, len(volumes), len(points_utm),
4830                     verbose=verbose,sww_precision=Float)
4831    outfile.mean_stage = mean_stage
4832    outfile.zscale = zscale
4833
4834    sww.store_triangulation(outfile, points_utm, volumes,
4835                            elevation, zone,  new_origin=origin,
4836                            verbose=verbose)
4837   
4838    if verbose: print 'Converting quantities'
4839    j = 0
4840    # Read in a time slice from each mux file and write it to the sww file
4841    for ha, ua, va in map(None, mux['HA'], mux['UA'], mux['VA']):
4842        if j >= mux_times_start_i and j < mux_times_fin_i:
4843            stage = zscale*ha + mean_stage
4844            h = stage - elevation
4845            xmomentum = ua*h
4846            ymomentum = -1*va*h # -1 since in mux files south is positive.
4847            sww.store_quantities(outfile, 
4848                                 slice_index=j - mux_times_start_i,
4849                                 verbose=verbose,
4850                                 stage=stage,
4851                                 xmomentum=xmomentum,
4852                                 ymomentum=ymomentum,
4853                                 sww_precision=Float)
4854        j += 1
4855    if verbose: sww.verbose_quantities(outfile)
4856    outfile.close()
4857    #
4858    # Do some conversions while writing the sww file
4859
4860    ##################################
4861    # READ MUX2 FILES line of points #
4862    ##################################
4863
4864WAVEHEIGHT_MUX2_LABEL = '-z-mux2'
4865EAST_VELOCITY_MUX2_LABEL =  '-e-mux2'
4866NORTH_VELOCITY_MUX2_LABEL =  '-n-mux2'   
4867
4868def read_mux2_py(filenames,
4869                 weights,
4870                 permutation=None,
4871                 verbose=False):
4872    """Access the mux files specified in the filenames list. Combine the
4873       data found therin as a weighted linear sum as specifed by the weights.
4874       If permutation is None or empty extract timeseries data for all gauges within the
4875       files.
4876         
4877       Input:
4878           filenames:   List of filenames specifiying the file containing the
4879                        timeseries data (mux2 format) for each source
4880           weights:     Weights associated with each source
4881           permutation: Specifies the gauge numbers that for which data is to be
4882                        extracted
4883    """ 
4884
4885    from Numeric import ones,Float,compress,zeros,arange
4886    from urs_ext import read_mux2
4887
4888    numSrc=len(filenames)
4889
4890    file_params=-1*ones(3,Float)#[nsta,dt,nt]
4891   
4892    # Convert verbose to int C flag
4893    if verbose:
4894        verbose=1
4895    else:
4896        verbose=0
4897       
4898       
4899    #msg = 'I got the permutation vector:' + str(permutation)
4900    #assert permutation is not None, msg
4901    if permutation is None:
4902        permutation = ensure_numeric([], Float)   
4903
4904    # Call underlying C implementation urs2sts_ext.c   
4905    data=read_mux2(numSrc,filenames,weights,file_params,permutation,verbose)
4906
4907    msg='File parameter values were not read in correctly from c file'
4908    assert len(compress(file_params>0,file_params))!=0,msg
4909   
4910    msg='The number of stations specifed in the c array and in the file are inconsistent'
4911    assert file_params[0] >= len(permutation)
4912   
4913    msg='The number of stations returned is inconsistent with the requested number'   
4914    assert len(permutation) == 0 or len(permutation) == data.shape[0], msg
4915   
4916    nsta=int(file_params[0])
4917    msg='Must have at least one station'
4918    assert nsta>0,msg
4919   
4920    dt=file_params[1]
4921    msg='Must have a postive timestep'
4922    assert dt>0,msg
4923   
4924    nt=int(file_params[2])
4925    msg='Must have at least one gauge value'
4926    assert nt>0,msg
4927   
4928    OFFSET=5 #number of site parameters p passed back with data
4929    #p=[geolat,geolon,depth,start_tstep,finish_tstep]
4930
4931    times=dt*arange(1,(data.shape[1]-OFFSET)+1)
4932    latitudes=zeros(data.shape[0],Float)
4933    longitudes=zeros(data.shape[0],Float)
4934    elevation=zeros(data.shape[0],Float)
4935    quantity=zeros((data.shape[0],data.shape[1]-OFFSET),Float)
4936    starttime=1e16
4937    for i in range(0,data.shape[0]):
4938        latitudes[i]=data[i][data.shape[1]-OFFSET]
4939        longitudes[i]=data[i][data.shape[1]-OFFSET+1]
4940        elevation[i]=-data[i][data.shape[1]-OFFSET+2]
4941        quantity[i]=data[i][:-OFFSET]
4942        starttime=min(dt*data[i][data.shape[1]-OFFSET+3],starttime)
4943       
4944    return times, latitudes, longitudes, elevation, quantity, starttime
4945
4946def mux2sww_time(mux_times, mint, maxt):
4947    """
4948    """
4949
4950    if mint == None:
4951        mux_times_start_i = 0
4952    else:
4953        mux_times_start_i = searchsorted(mux_times, mint)
4954       
4955    if maxt == None:
4956        mux_times_fin_i = len(mux_times)
4957    else:
4958        maxt += 0.5 # so if you specify a time where there is
4959                    # data that time will be included
4960        mux_times_fin_i = searchsorted(mux_times, maxt)
4961
4962    return mux_times_start_i, mux_times_fin_i
4963
4964
4965def urs2sts(basename_in, basename_out=None, 
4966            weights=None,
4967            verbose=False, 
4968            origin=None,
4969            mean_stage=0.0, 
4970            zscale=1.0,
4971            ordering_filename=None):
4972    """Convert URS mux2 format for wave propagation to sts format
4973
4974    Also convert latitude and longitude to UTM. All coordinates are
4975    assumed to be given in the GDA94 datum
4976
4977    origin is a 3-tuple with geo referenced
4978    UTM coordinates (zone, easting, northing)
4979   
4980    inputs:
4981       
4982    basename_in: list of source file prefixes
4983   
4984        These are combined with the extensions:
4985        WAVEHEIGHT_MUX2_LABEL = '-z-mux2' for stage
4986        EAST_VELOCITY_MUX2_LABEL =  '-e-mux2' xmomentum
4987        NORTH_VELOCITY_MUX2_LABEL =  '-n-mux2' and ymomentum
4988   
4989        to create a 2D list of mux2 file. The rows are associated with each
4990        quantity and must have the above extensions
4991        the columns are the list of file prefixes.
4992   
4993    ordering: a .txt file name specifying which mux2 gauge points are
4994              to be stored. This is indicated by the index of the gauge
4995              in the ordering file.
4996   
4997              ordering file format:
4998              1st line:    'index,longitude,latitude\n'
4999              other lines: index,longitude,latitude
5000             
5001              If ordering is None or ordering file is empty then
5002               all points are taken in the order they
5003              appear in the mux2 file.
5004   
5005   
5006    output:
5007      basename_out: name of sts file in which mux2 data is stored.
5008   
5009   
5010    """
5011    import os
5012    from Scientific.IO.NetCDF import NetCDFFile
5013    from Numeric import Float, Int, Int32, searchsorted, zeros, array
5014    from Numeric import allclose, around,ones,Float
5015    from types import ListType,StringType
5016    from operator import __and__
5017   
5018    if not isinstance(basename_in, ListType):
5019        if verbose: print 'Reading single source'
5020        basename_in=[basename_in]
5021
5022    # Check that basename is a list of strings
5023    if not reduce(__and__, map(lambda z:isinstance(z,StringType), basename_in)):
5024        msg= 'basename_in must be a string or list of strings'
5025        raise Exception, msg
5026
5027    # Find the number of sources to be used
5028    numSrc=len(basename_in)
5029
5030    # A weight must be specified for each source
5031    if weights is None:
5032        # Default is equal weighting
5033        weights=ones(numSrc,Float)/numSrc
5034    else:
5035        weights = ensure_numeric(weights)
5036        msg = 'When combining multiple sources must specify a weight for '+\
5037              'mux2 source file'
5038        assert len(weights) == numSrc, msg
5039       
5040    if verbose:
5041        print 'Weights used in urs2sts:', weights
5042
5043    # Check output filename   
5044    if basename_out is None:
5045        msg = 'STS filename must be specified as basename_out'
5046        msg += ' in function urs2sts'
5047        raise Exception, msg
5048   
5049    if basename_out.endswith('.sts'):
5050        stsname = basename_out
5051    else:   
5052        stsname = basename_out + '.sts'       
5053
5054    # Create input filenames from basenames and check their existence
5055    files_in=[[],[],[]]
5056    for files in basename_in:
5057        files_in[0].append(files + WAVEHEIGHT_MUX2_LABEL),
5058        files_in[1].append(files + EAST_VELOCITY_MUX2_LABEL)
5059        files_in[2].append(files + NORTH_VELOCITY_MUX2_LABEL)
5060   
5061    quantities = ['HA','UA','VA'] # Quantity names used in the MUX2 format
5062    for i in range(len(quantities)): 
5063        for file_in in files_in[i]:
5064            if (os.access(file_in, os.F_OK) == 0):
5065                msg = 'File %s does not exist or is not accessible' %file_in
5066                raise IOError, msg
5067
5068    # Establish permutation array
5069    if ordering_filename is not None:
5070
5071        if verbose is True:
5072            print 'Reading ordering file', ordering_filename
5073        # Read ordering file
5074        try:
5075            fid=open(ordering_filename, 'r')
5076            file_header=fid.readline().split(',')
5077            ordering_lines=fid.readlines()
5078            fid.close()
5079        except:
5080            msg = 'Cannot open %s'%ordering_filename
5081            raise Exception, msg
5082
5083        reference_header = 'index, longitude, latitude\n'
5084        reference_header_split = reference_header.split(',')
5085        for i in range(3):
5086            if not file_header[i].strip() == reference_header_split[i].strip():
5087                msg = 'File must contain header: '+reference_header
5088                raise Exception, msg
5089
5090        if len(ordering_lines)<2:
5091            msg = 'File must contain at least two points'
5092            raise Exception, msg
5093
5094        permutation = ensure_numeric([int(line.split(',')[0]) for line in ordering_lines])
5095    else:
5096        permutation = None
5097
5098
5099    #print 'permutation', permutation
5100    # Read MUX2 files
5101    if (verbose): print 'reading mux2 file'
5102    mux={}
5103    for i, quantity in enumerate(quantities):
5104   
5105        # For each quantity read the associated list of source mux2 file with
5106        # extention associated with that quantity
5107        times,\
5108        latitudes,\
5109        longitudes,\
5110        elevation,\
5111        mux[quantity],\
5112        starttime = read_mux2_py(files_in[i], weights, permutation, verbose=verbose)
5113   
5114        # Check that all quantities have consistent time and space information     
5115        if quantity!=quantities[0]:
5116            msg='%s, %s and %s have inconsistent gauge data'\
5117                %(files_in[0], files_in[1], files_in[2])
5118            assert allclose(times, times_old), msg
5119            assert allclose(latitudes, latitudes_old), msg
5120            assert allclose(longitudes, longitudes_old), msg
5121            assert allclose(elevation, elevation_old), msg
5122            assert allclose(starttime, starttime_old), msg
5123        times_old=times
5124        latitudes_old=latitudes
5125        longitudes_old=longitudes
5126        elevation_old=elevation
5127        starttime_old=starttime
5128
5129
5130       
5131       
5132       
5133        # Self check - can be removed to improve speed
5134
5135        #ref_longitudes = [float(line.split(',')[1]) for line in ordering_lines]               
5136        #ref_latitudes = [float(line.split(',')[2]) for line in ordering_lines]       
5137        #
5138        #msg = 'Longitudes specified in ordering file do not match those found in mux files'
5139        #msg += 'I got %s instead of %s (only beginning shown)'\
5140        #    %(str(longitudes[:10]) + '...',
5141        #      str(ref_longitudes[:10]) + '...')       
5142        #assert allclose(longitudes, ref_longitudes), msg       
5143        #
5144        #msg = 'Latitudes specified in ordering file do not match those found in mux files. '
5145        #msg += 'I got %s instead of %s (only beginning shown)'\
5146        #    %(str(latitudes[:10]) + '...',
5147        #      str(ref_latitudes[:10]) + '...')               
5148        #assert allclose(latitudes, ref_latitudes), msg
5149       
5150               
5151       
5152    # Store timeseries in NetCDF STS file   
5153    msg='File is empty and or clipped region not in file region'
5154    assert len(latitudes>0),msg
5155
5156    number_of_points = latitudes.shape[0]      # Number of stations retrieved
5157    number_of_times = times.shape[0]           # Number of timesteps
5158    number_of_latitudes = latitudes.shape[0]   # Number latitudes
5159    number_of_longitudes = longitudes.shape[0] # Number longitudes
5160
5161    # NetCDF file definition
5162    outfile = NetCDFFile(stsname, 'w')
5163
5164    description = 'Converted from URS mux2 files: %s'\
5165                  %(basename_in)
5166   
5167    # Create new file
5168    sts = Write_sts()
5169
5170    sts.store_header(outfile, 
5171                     times+starttime,
5172                     number_of_points, 
5173                     description=description,
5174                     verbose=verbose,
5175                     sts_precision=Float)
5176   
5177    # Store
5178    from anuga.coordinate_transforms.redfearn import redfearn
5179    x = zeros(number_of_points, Float)  # Easting
5180    y = zeros(number_of_points, Float)  # Northing
5181
5182    # Check zone boundaries
5183    refzone, _, _ = redfearn(latitudes[0],longitudes[0]) 
5184
5185    for i in range(number_of_points):
5186        zone, easting, northing = redfearn(latitudes[i],longitudes[i])
5187        x[i] = easting
5188        y[i] = northing
5189        msg='all sts gauges need to be in the same zone'
5190        assert zone==refzone,msg
5191
5192    if origin is None:
5193        origin = Geo_reference(refzone,min(x),min(y))
5194    geo_ref = write_NetCDF_georeference(origin, outfile)
5195
5196
5197   
5198    #print geo_ref.get_xllcorner()
5199    #print geo_ref.get_yllcorner()
5200
5201    elevation = resize(elevation,outfile.variables['elevation'][:].shape)
5202    outfile.variables['x'][:] = x - geo_ref.get_xllcorner()
5203    outfile.variables['y'][:] = y - geo_ref.get_yllcorner()
5204    outfile.variables['elevation'][:] = elevation
5205
5206    stage = outfile.variables['stage']
5207    xmomentum = outfile.variables['xmomentum']
5208    ymomentum = outfile.variables['ymomentum']
5209
5210    if verbose: print 'Converting quantities'
5211    for j in range(len(times)):
5212        for i in range(number_of_points):
5213            w = zscale*mux['HA'][i,j] + mean_stage
5214            h=w-elevation[i]
5215            stage[j,i] = w
5216
5217            xmomentum[j,i] = mux['UA'][i,j]*h
5218            ymomentum[j,i] = mux['VA'][i,j]*h
5219
5220    outfile.close()
5221
5222def create_sts_boundary(stsname):
5223    """
5224        Create boundary segments from .sts file. Points can be stored in
5225    arbitrary order within the .sts file. The order in which the .sts points
5226    make up the boundary are given in order.txt file
5227   
5228    FIXME:
5229    Point coordinates are stored in relative eastings and northings.
5230    But boundary is produced in absolute coordinates
5231   
5232    """
5233    try:
5234        fid = NetCDFFile(stsname+'.sts', 'r')
5235    except:
5236        msg = 'Cannot open %s'%stsname+'.sts'
5237
5238
5239    xllcorner = fid.xllcorner[0]
5240    yllcorner = fid.yllcorner[0]
5241    #Points stored in sts file are normalised to [xllcorner,yllcorner] but
5242    #we cannot assume that boundary polygon will be. At least the
5243    #additional points specified by the user after this function is called
5244    x = fid.variables['x'][:]+xllcorner
5245    y = fid.variables['y'][:]+yllcorner
5246
5247    x = reshape(x, (len(x),1))
5248    y = reshape(y, (len(y),1))
5249    sts_points = concatenate((x,y), axis=1)
5250
5251    return sts_points.tolist()
5252
5253class Write_sww:
5254    from anuga.shallow_water.shallow_water_domain import Domain
5255
5256    # FIXME (Ole): Hardwiring the conserved quantities like
5257    # this could be a problem. I would prefer taking them from
5258    # the instantiation of Domain.
5259    #
5260    # (DSG) There is not always a Domain instance when Write_sww is used.
5261    # Check to see if this is the same level of hardwiring as is in
5262    # shallow water doamain.
5263   
5264    sww_quantities = Domain.conserved_quantities
5265
5266
5267    RANGE = '_range'
5268    EXTREMA = ':extrema'
5269
5270    def __init__(self):
5271        pass
5272   
5273    def store_header(self,
5274                     outfile,
5275                     times,
5276                     number_of_volumes,
5277                     number_of_points,
5278                     description='Converted from XXX',
5279                     smoothing=True,
5280                     order=1,
5281                     sww_precision=Float32,
5282                     verbose=False):
5283        """
5284        outfile - the name of the file that will be written
5285        times - A list of the time slice times OR a start time
5286        Note, if a list is given the info will be made relative.
5287        number_of_volumes - the number of triangles
5288        """
5289   
5290        outfile.institution = 'Geoscience Australia'
5291        outfile.description = description
5292
5293        # For sww compatibility
5294        if smoothing is True:
5295            # Smoothing to be depreciated
5296            outfile.smoothing = 'Yes'
5297            outfile.vertices_are_stored_uniquely = 'False'
5298        else:
5299            # Smoothing to be depreciated
5300            outfile.smoothing = 'No'
5301            outfile.vertices_are_stored_uniquely = 'True'
5302        outfile.order = order
5303
5304        try:
5305            revision_number = get_revision_number()
5306        except:
5307            revision_number = None
5308        # Allow None to be stored as a string               
5309        outfile.revision_number = str(revision_number) 
5310
5311
5312       
5313        # times - A list or array of the time slice times OR a start time
5314        # times = ensure_numeric(times)
5315        # Start time in seconds since the epoch (midnight 1/1/1970)
5316
5317        # This is being used to seperate one number from a list.
5318        # what it is actually doing is sorting lists from numeric arrays.
5319        if type(times) is list or type(times) is ArrayType: 
5320            number_of_times = len(times)
5321            times = ensure_numeric(times) 
5322            if number_of_times == 0:
5323                starttime = 0
5324            else:
5325                starttime = times[0]
5326                times = times - starttime  #Store relative times
5327        else:
5328            number_of_times = 0
5329            starttime = times
5330            #times = ensure_numeric([])
5331        outfile.starttime = starttime
5332        # dimension definitions
5333        outfile.createDimension('number_of_volumes', number_of_volumes)
5334        outfile.createDimension('number_of_vertices', 3)
5335        outfile.createDimension('numbers_in_range', 2)
5336   
5337        if smoothing is True:
5338            outfile.createDimension('number_of_points', number_of_points)
5339       
5340            # FIXME(Ole): This will cause sww files for paralle domains to
5341            # have ghost nodes stored (but not used by triangles).
5342            # To clean this up, we have to change get_vertex_values and
5343            # friends in quantity.py (but I can't be bothered right now)
5344        else:
5345            outfile.createDimension('number_of_points', 3*number_of_volumes)
5346        outfile.createDimension('number_of_timesteps', number_of_times)
5347
5348        # variable definitions
5349        outfile.createVariable('x', sww_precision, ('number_of_points',))
5350        outfile.createVariable('y', sww_precision, ('number_of_points',))
5351        outfile.createVariable('elevation', sww_precision, ('number_of_points',))
5352        q = 'elevation'
5353        outfile.createVariable(q+Write_sww.RANGE, sww_precision,
5354                               ('numbers_in_range',))
5355
5356
5357        # Initialise ranges with small and large sentinels.
5358        # If this was in pure Python we could have used None sensibly
5359        outfile.variables[q+Write_sww.RANGE][0] = max_float  # Min
5360        outfile.variables[q+Write_sww.RANGE][1] = -max_float # Max
5361
5362        # FIXME: Backwards compatibility
5363        outfile.createVariable('z', sww_precision, ('number_of_points',))
5364        #################################
5365
5366        outfile.createVariable('volumes', Int, ('number_of_volumes',
5367                                                'number_of_vertices'))
5368        # Doing sww_precision instead of Float gives cast errors.
5369        outfile.createVariable('time', Float,
5370                               ('number_of_timesteps',))
5371       
5372        for q in Write_sww.sww_quantities:
5373            outfile.createVariable(q, sww_precision,
5374                                   ('number_of_timesteps',
5375                                    'number_of_points')) 
5376            outfile.createVariable(q+Write_sww.RANGE, sww_precision,
5377                                   ('numbers_in_range',))
5378
5379            # Initialise ranges with small and large sentinels.
5380            # If this was in pure Python we could have used None sensibly
5381            outfile.variables[q+Write_sww.RANGE][0] = max_float  # Min
5382            outfile.variables[q+Write_sww.RANGE][1] = -max_float # Max
5383           
5384        if type(times) is list or type(times) is ArrayType: 
5385            outfile.variables['time'][:] = times    #Store time relative
5386           
5387        if verbose:
5388            print '------------------------------------------------'
5389            print 'Statistics:'
5390            print '    t in [%f, %f], len(t) == %d'\
5391                  %(min(times.flat), max(times.flat), len(times.flat))
5392
5393       
5394    def store_triangulation(self,
5395                            outfile,
5396                            points_utm,
5397                            volumes,
5398                            elevation, zone=None, new_origin=None, 
5399                            points_georeference=None, verbose=False):
5400        """
5401       
5402        new_origin - qa georeference that the points can be set to. (Maybe
5403        do this before calling this function.)
5404       
5405        points_utm - currently a list or array of the points in UTM.
5406        points_georeference - the georeference of the points_utm
5407       
5408        How about passing new_origin and current_origin.
5409        If you get both, do a convertion from the old to the new.
5410       
5411        If you only get new_origin, the points are absolute,
5412        convert to relative
5413       
5414        if you only get the current_origin the points are relative, store
5415        as relative.
5416       
5417        if you get no georefs create a new georef based on the minimums of
5418        points_utm.  (Another option would be to default to absolute)
5419       
5420        Yes, and this is done in another part of the code.
5421        Probably geospatial.
5422       
5423        If you don't supply either geo_refs, then supply a zone. If not
5424        the default zone will be used.
5425       
5426       
5427        precon
5428       
5429        header has been called.
5430        """
5431       
5432        number_of_points = len(points_utm)   
5433        volumes = array(volumes) 
5434        points_utm = array(points_utm)
5435
5436        # given the two geo_refs and the points, do the stuff
5437        # described in the method header
5438        # if this is needed else where, pull out as a function
5439        points_georeference = ensure_geo_reference(points_georeference)
5440        new_origin = ensure_geo_reference(new_origin)
5441        if new_origin is None and points_georeference is not None:
5442            points = points_utm
5443            geo_ref = points_georeference
5444        else:
5445            if new_origin is None:
5446                new_origin = Geo_reference(zone,min(points_utm[:,0]),
5447                                           min(points_utm[:,1]))
5448            points = new_origin.change_points_geo_ref(points_utm,
5449                                                      points_georeference)
5450            geo_ref = new_origin
5451
5452        # At this stage I need a georef and points
5453        # the points are relative to the georef
5454        geo_ref.write_NetCDF(outfile)
5455   
5456        # This will put the geo ref in the middle
5457        #geo_ref=Geo_reference(refzone,(max(x)+min(x))/2.0,(max(x)+min(y))/2.)
5458       
5459        x =  points[:,0]
5460        y =  points[:,1]
5461        z = outfile.variables['z'][:]
5462   
5463        if verbose:
5464            print '------------------------------------------------'
5465            print 'More Statistics:'
5466            print '  Extent (/lon):'
5467            print '    x in [%f, %f], len(lat) == %d'\
5468                  %(min(x), max(x),
5469                    len(x))
5470            print '    y in [%f, %f], len(lon) == %d'\
5471                  %(min(y), max(y),
5472                    len(y))
5473            print '    z in [%f, %f], len(z) == %d'\
5474                  %(min(elevation), max(elevation),
5475                    len(elevation))
5476            print 'geo_ref: ',geo_ref
5477            print '------------------------------------------------'
5478           
5479        #z = resize(bath_grid,outfile.variables['z'][:].shape)
5480        #print "points[:,0]", points[:,0]
5481        outfile.variables['x'][:] = points[:,0] #- geo_ref.get_xllcorner()
5482        outfile.variables['y'][:] = points[:,1] #- geo_ref.get_yllcorner()
5483        outfile.variables['z'][:] = elevation
5484        outfile.variables['elevation'][:] = elevation  #FIXME HACK
5485        outfile.variables['volumes'][:] = volumes.astype(Int32) #On Opteron 64
5486
5487        q = 'elevation'
5488        # This updates the _range values
5489        outfile.variables[q+Write_sww.RANGE][0] = min(elevation)
5490        outfile.variables[q+Write_sww.RANGE][1] = max(elevation)
5491
5492
5493    def store_quantities(self, outfile, sww_precision=Float32,
5494                         slice_index=None, time=None,
5495                         verbose=False, **quant):
5496        """
5497        Write the quantity info.
5498
5499        **quant is extra keyword arguments passed in. These must be
5500          the sww quantities, currently; stage, xmomentum, ymomentum.
5501       
5502        if the time array is already been built, use the slice_index
5503        to specify the index.
5504       
5505        Otherwise, use time to increase the time dimension
5506
5507        Maybe make this general, but the viewer assumes these quantities,
5508        so maybe we don't want it general - unless the viewer is general
5509       
5510        precon
5511        triangulation and
5512        header have been called.
5513        """
5514
5515        if time is not None:
5516            file_time = outfile.variables['time']
5517            slice_index = len(file_time)
5518            file_time[slice_index] = time   
5519
5520        # Write the conserved quantities from Domain.
5521        # Typically stage,  xmomentum, ymomentum
5522        # other quantities will be ignored, silently.
5523        # Also write the ranges: stage_range,
5524        # xmomentum_range and ymomentum_range
5525        for q in Write_sww.sww_quantities:
5526            if not quant.has_key(q):
5527                msg = 'SWW file can not write quantity %s' %q
5528                raise NewQuantity, msg
5529            else:
5530                q_values = quant[q]
5531                outfile.variables[q][slice_index] = \
5532                                q_values.astype(sww_precision)
5533
5534                # This updates the _range values
5535                q_range = outfile.variables[q+Write_sww.RANGE][:]
5536                q_values_min = min(q_values)
5537                if q_values_min < q_range[0]:
5538                    outfile.variables[q+Write_sww.RANGE][0] = q_values_min
5539                q_values_max = max(q_values)
5540                if q_values_max > q_range[1]:
5541                    outfile.variables[q+Write_sww.RANGE][1] = q_values_max
5542
5543    def verbose_quantities(self, outfile):
5544        print '------------------------------------------------'
5545        print 'More Statistics:'
5546        for q in Write_sww.sww_quantities:
5547            print %s in [%f, %f]' %(q,
5548                                       outfile.variables[q+Write_sww.RANGE][0],
5549                                       outfile.variables[q+Write_sww.RANGE][1])
5550        print '------------------------------------------------'
5551
5552
5553       
5554def obsolete_write_sww_time_slices(outfile, has, uas, vas, elevation,
5555                         mean_stage=0, zscale=1,
5556                         verbose=False):   
5557    #Time stepping
5558    stage = outfile.variables['stage']
5559    xmomentum = outfile.variables['xmomentum']
5560    ymomentum = outfile.variables['ymomentum']
5561
5562    n = len(has)
5563    j=0
5564    for ha, ua, va in map(None, has, uas, vas):
5565        if verbose and j%((n+10)/10)==0: print '  Doing %d of %d' %(j, n)
5566        w = zscale*ha + mean_stage
5567        stage[j] = w
5568        h = w - elevation
5569        xmomentum[j] = ua*h
5570        ymomentum[j] = -1*va*#  -1 since in mux files south is positive.
5571        j += 1
5572   
5573def urs2txt(basename_in, location_index=None):
5574    """
5575    Not finished or tested
5576    """
5577   
5578    files_in = [basename_in + WAVEHEIGHT_MUX_LABEL,
5579                basename_in + EAST_VELOCITY_LABEL,
5580                basename_in + NORTH_VELOCITY_LABEL]
5581    quantities = ['HA','UA','VA']
5582
5583    d = ","
5584   
5585    # instanciate urs_points of the three mux files.
5586    mux = {}
5587    for quantity, file in map(None, quantities, files_in):
5588        mux[quantity] = Urs_points(file)
5589       
5590    # Could check that the depth is the same. (hashing)
5591
5592    # handle to a mux file to do depth stuff
5593    a_mux = mux[quantities[0]]
5594   
5595    # Convert to utm
5596    latitudes = a_mux.lonlatdep[:,1]
5597    longitudes = a_mux.lonlatdep[:,0]
5598    points_utm, zone = convert_from_latlon_to_utm( \
5599        latitudes=latitudes, longitudes=longitudes)
5600    #print "points_utm", points_utm
5601    #print "zone", zone
5602    depths = a_mux.lonlatdep[:,2]  #
5603   
5604    fid = open(basename_in + '.txt', 'w')
5605
5606    fid.write("zone: " + str(zone) + "\n")
5607
5608    if location_index is not None:
5609        #Title
5610        li = location_index
5611        fid.write('location_index'+d+'lat'+d+ 'long' +d+ 'Easting' +d+ \
5612                  'Northing' + "\n")
5613        fid.write(str(li) +d+ str(latitudes[li])+d+ \
5614              str(longitudes[li]) +d+ str(points_utm[li][0]) +d+ \
5615              str(points_utm[li][01]) + "\n")
5616
5617    # the non-time dependent stuff
5618    #Title
5619    fid.write('location_index'+d+'lat'+d+ 'long' +d+ 'Easting' +d+ \
5620              'Northing' +d+ 'depth m' + "\n")
5621    i = 0
5622    for depth, point_utm, lat, long in map(None, depths,
5623                                               points_utm, latitudes,
5624                                               longitudes):
5625       
5626        fid.write(str(i) +d+ str(lat)+d+ str(long) +d+ str(point_utm[0]) +d+ \
5627                  str(point_utm[01]) +d+ str(depth) + "\n")
5628        i +=1
5629    #Time dependent
5630    if location_index is not None:
5631        time_step = a_mux.time_step
5632        i = 0
5633        #Title
5634        fid.write('time' +d+ 'HA depth m'+d+ \
5635                 'UA momentum East x m/sec' +d+ 'VA momentum North y m/sec' \
5636                      + "\n")
5637        for HA, UA, VA in map(None, mux['HA'], mux['UA'], mux['VA']):
5638            fid.write(str(i*time_step) +d+ str(HA[location_index])+d+ \
5639                      str(UA[location_index]) +d+ str(VA[location_index]) \
5640                      + "\n")
5641           
5642            i +=1
5643
5644class Write_sts:
5645
5646    sts_quantities = ['stage','xmomentum','ymomentum']
5647
5648
5649    RANGE = '_range'
5650    EXTREMA = ':extrema'
5651   
5652    def __init__(self):
5653        pass
5654
5655    def store_header(self,
5656                     outfile,
5657                     times,
5658                     number_of_points,
5659                     description='Converted from URS mux2 format',
5660                     sts_precision=Float32,
5661                     verbose=False):
5662        """
5663        outfile - the name of the file that will be written
5664        times - A list of the time slice times OR a start time
5665        Note, if a list is given the info will be made relative.
5666        number_of_points - the number of urs gauges sites
5667        """
5668
5669        outfile.institution = 'Geoscience Australia'
5670        outfile.description = description
5671
5672        try:
5673            revision_number = get_revision_number()
5674        except:
5675            revision_number = None
5676        # Allow None to be stored as a string               
5677        outfile.revision_number = str(revision_number) 
5678       
5679        # times - A list or array of the time slice times OR a start time
5680        # Start time in seconds since the epoch (midnight 1/1/1970)
5681
5682        # This is being used to seperate one number from a list.
5683        # what it is actually doing is sorting lists from numeric arrays.
5684        if type(times) is list or type(times) is ArrayType: 
5685            number_of_times = len(times)
5686            times = ensure_numeric(times) 
5687            if number_of_times == 0:
5688                starttime = 0
5689            else:
5690                starttime = times[0]
5691                times = times - starttime  #Store relative times
5692        else:
5693            number_of_times = 0
5694            starttime = times
5695
5696        outfile.starttime = starttime
5697
5698        # Dimension definitions
5699        outfile.createDimension('number_of_points', number_of_points)
5700        outfile.createDimension('number_of_timesteps', number_of_times)
5701        outfile.createDimension('numbers_in_range', 2)
5702
5703        # Variable definitions
5704        outfile.createVariable('x', sts_precision, ('number_of_points',))
5705        outfile.createVariable('y', sts_precision, ('number_of_points',))
5706        outfile.createVariable('elevation', sts_precision,('number_of_points',))
5707
5708        q = 'elevation'
5709        outfile.createVariable(q+Write_sts.RANGE, sts_precision,
5710                               ('numbers_in_range',))
5711
5712        # Initialise ranges with small and large sentinels.
5713        # If this was in pure Python we could have used None sensibly
5714        outfile.variables[q+Write_sts.RANGE][0] = max_float  # Min
5715        outfile.variables[q+Write_sts.RANGE][1] = -max_float # Max
5716
5717        # Doing sts_precision instead of Float gives cast errors.
5718        outfile.createVariable('time', Float, ('number_of_timesteps',))
5719
5720        for q in Write_sts.sts_quantities:
5721            outfile.createVariable(q, sts_precision,
5722                                   ('number_of_timesteps',
5723                                    'number_of_points'))
5724            outfile.createVariable(q+Write_sts.RANGE, sts_precision,
5725                                   ('numbers_in_range',))
5726            # Initialise ranges with small and large sentinels.
5727            # If this was in pure Python we could have used None sensibly
5728            outfile.variables[q+Write_sts.RANGE][0] = max_float  # Min
5729            outfile.variables[q+Write_sts.RANGE][1] = -max_float # Max
5730
5731        if type(times) is list or type(times) is ArrayType: 
5732            outfile.variables['time'][:] = times    #Store time relative
5733
5734        if verbose:
5735            print '------------------------------------------------'
5736            print 'Statistics:'
5737            print '    t in [%f, %f], len(t) == %d'\
5738                  %(min(times.flat), max(times.flat), len(times.flat))
5739
5740    def store_points(self,
5741                     outfile,
5742                     points_utm,
5743                     elevation, zone=None, new_origin=None, 
5744                     points_georeference=None, verbose=False):
5745
5746        """
5747        points_utm - currently a list or array of the points in UTM.
5748        points_georeference - the georeference of the points_utm
5749
5750        How about passing new_origin and current_origin.
5751        If you get both, do a convertion from the old to the new.
5752       
5753        If you only get new_origin, the points are absolute,
5754        convert to relative
5755       
5756        if you only get the current_origin the points are relative, store
5757        as relative.
5758       
5759        if you get no georefs create a new georef based on the minimums of
5760        points_utm.  (Another option would be to default to absolute)
5761       
5762        Yes, and this is done in another part of the code.
5763        Probably geospatial.
5764       
5765        If you don't supply either geo_refs, then supply a zone. If not
5766        the default zone will be used.
5767
5768        precondition:
5769             header has been called.
5770        """
5771
5772        number_of_points = len(points_utm)
5773        points_utm = array(points_utm)
5774
5775        # given the two geo_refs and the points, do the stuff
5776        # described in the method header
5777        points_georeference = ensure_geo_reference(points_georeference)
5778        new_origin = ensure_geo_reference(new_origin)
5779       
5780        if new_origin is None and points_georeference is not None:
5781            points = points_utm
5782            geo_ref = points_georeference
5783        else:
5784            if new_origin is None:
5785                new_origin = Geo_reference(zone,min(points_utm[:,0]),
5786                                           min(points_utm[:,1]))
5787            points = new_origin.change_points_geo_ref(points_utm,
5788                                                      points_georeference)
5789            geo_ref = new_origin
5790
5791        # At this stage I need a georef and points
5792        # the points are relative to the georef
5793        geo_ref.write_NetCDF(outfile)
5794
5795        x =  points[:,0]
5796        y =  points[:,1]
5797        z = outfile.variables['z'][:]
5798   
5799        if verbose:
5800            print '------------------------------------------------'
5801            print 'More Statistics:'
5802            print '  Extent (/lon):'
5803            print '    x in [%f, %f], len(lat) == %d'\
5804                  %(min(x), max(x),
5805                    len(x))
5806            print '    y in [%f, %f], len(lon) == %d'\
5807                  %(min(y), max(y),
5808                    len(y))
5809            print '    z in [%f, %f], len(z) == %d'\
5810                  %(min(elevation), max(elevation),
5811                    len(elevation))
5812            print 'geo_ref: ',geo_ref
5813            print '------------------------------------------------'
5814           
5815        #z = resize(bath_grid,outfile.variables['z'][:].shape)
5816        #print "points[:,0]", points[:,0]
5817        outfile.variables['x'][:] = points[:,0] #- geo_ref.get_xllcorner()
5818        outfile.variables['y'][:] = points[:,1] #- geo_ref.get_yllcorner()
5819        outfile.variables['z'][:] = elevation
5820        outfile.variables['elevation'][:] = elevation  #FIXME HACK4
5821
5822        q = 'elevation'
5823        # This updates the _range values
5824        outfile.variables[q+Write_sts.RANGE][0] = min(elevation)
5825        outfile.variables[q+Write_sts.RANGE][1] = max(elevation)
5826
5827    def store_quantities(self, outfile, sts_precision=Float32,
5828                         slice_index=None, time=None,
5829                         verbose=False, **quant):
5830       
5831        """
5832        Write the quantity info.
5833
5834        **quant is extra keyword arguments passed in. These must be
5835          the sts quantities, currently; stage.
5836       
5837        if the time array is already been built, use the slice_index
5838        to specify the index.
5839       
5840        Otherwise, use time to increase the time dimension
5841
5842        Maybe make this general, but the viewer assumes these quantities,
5843        so maybe we don't want it general - unless the viewer is general
5844       
5845        precondition:
5846            triangulation and
5847            header have been called.
5848        """
5849        if time is not None:
5850            file_time = outfile.variables['time']
5851            slice_index = len(file_time)
5852            file_time[slice_index] = time   
5853
5854        # Write the conserved quantities from Domain.
5855        # Typically stage,  xmomentum, ymomentum
5856        # other quantities will be ignored, silently.
5857        # Also write the ranges: stage_range
5858        for q in Write_sts.sts_quantities:
5859            if not quant.has_key(q):
5860                msg = 'STS file can not write quantity %s' %q
5861                raise NewQuantity, msg
5862            else:
5863                q_values = quant[q]
5864                outfile.variables[q][slice_index] = \
5865                                q_values.astype(sts_precision)
5866
5867                # This updates the _range values
5868                q_range = outfile.variables[q+Write_sts.RANGE][:]
5869                q_values_min = min(q_values)
5870                if q_values_min < q_range[0]:
5871                    outfile.variables[q+Write_sts.RANGE][0] = q_values_min
5872                q_values_max = max(q_values)
5873                if q_values_max > q_range[1]:
5874                    outfile.variables[q+Write_sts.RANGE][1] = q_values_max
5875
5876
5877   
5878class Urs_points:
5879    """
5880    Read the info in URS mux files.
5881
5882    for the quantities heres a correlation between the file names and
5883    what they mean;
5884    z-mux is height above sea level, m
5885    e-mux is velocity is Eastern direction, m/s
5886    n-mux is velocity is Northern direction, m/s   
5887    """
5888    def __init__(self,urs_file):
5889        self.iterated = False
5890        columns = 3 # long, lat , depth
5891        mux_file = open(urs_file, 'rb')
5892       
5893        # Number of points/stations
5894        (self.points_num,)= unpack('i',mux_file.read(4))
5895       
5896        # nt, int - Number of time steps
5897        (self.time_step_count,)= unpack('i',mux_file.read(4))
5898        #print "self.time_step_count", self.time_step_count
5899        #dt, float - time step, seconds
5900        (self.time_step,) = unpack('f', mux_file.read(4))
5901        #print "self.time_step", self.time_step
5902        msg = "Bad data in the urs file."
5903        if self.points_num < 0:
5904            mux_file.close()
5905            raise ANUGAError, msg
5906        if self.time_step_count < 0:
5907            mux_file.close()
5908            raise ANUGAError, msg
5909        if self.time_step < 0:
5910            mux_file.close()
5911            raise ANUGAError, msg
5912
5913        # the depth is in meters, and it is the distance from the ocean
5914        # to the sea bottom.
5915        lonlatdep = p_array.array('f')
5916        lonlatdep.read(mux_file, columns * self.points_num)
5917        lonlatdep = array(lonlatdep, typecode=Float)   
5918        lonlatdep = reshape(lonlatdep, (self.points_num, columns))
5919        #print 'lonlatdep',lonlatdep
5920        self.lonlatdep = lonlatdep
5921       
5922        self.mux_file = mux_file
5923        # check this array
5924
5925    def __iter__(self):
5926        """
5927        iterate over quantity data which is with respect to time.
5928
5929        Note: You can only interate once over an object
5930       
5931        returns quantity infomation for each time slice
5932        """
5933        msg =  "You can only interate once over a urs file."
5934        assert not self.iterated, msg
5935        self.iter_time_step = 0
5936        self.iterated = True
5937        return self
5938   
5939    def next(self):
5940        if self.time_step_count == self.iter_time_step:
5941            self.close()
5942            raise StopIteration
5943        #Read in a time slice  from mux file 
5944        hz_p_array = p_array.array('f')
5945        hz_p_array.read(self.mux_file, self.points_num)
5946        hz_p = array(hz_p_array, typecode=Float)
5947        self.iter_time_step += 1
5948       
5949        return hz_p
5950
5951    def close(self):
5952        self.mux_file.close()
5953       
5954    #### END URS UNGRIDDED 2 SWW ###
5955
5956       
5957def start_screen_catcher(dir_name=None, myid='', numprocs='', extra_info='',
5958                         verbose=True):
5959    """
5960    Used to store screen output and errors to file, if run on multiple
5961    processes eachprocessor will have its own output and error file.
5962   
5963    extra_info - is used as a string that can identify outputs with another
5964    string eg. '_other'
5965   
5966    FIXME: Would be good if you could suppress all the screen output and
5967    only save it to file... however it seems a bit tricky as this capture
5968    techique response to sys.stdout and by this time it is already printed out.
5969    """
5970   
5971    import sys
5972#    dir_name = dir_name
5973    if dir_name == None:
5974        dir_name=getcwd()
5975       
5976    if access(dir_name,W_OK) == 0:
5977        if verbose: print 'Making directory %s' %dir_name
5978      #  if verbose: print "myid", myid
5979        mkdir (dir_name,0777)
5980
5981    if myid <>'':
5982        myid = '_'+str(myid)
5983    if numprocs <>'':
5984        numprocs = '_'+str(numprocs)
5985    if extra_info <>'':
5986        extra_info = '_'+str(extra_info)
5987#    print 'hello1'
5988    screen_output_name = join(dir_name, "screen_output%s%s%s.txt" %(myid,
5989                                                                numprocs,
5990                                                                extra_info))
5991    screen_error_name = join(dir_name,  "screen_error%s%s%s.txt" %(myid,
5992                                                              numprocs,
5993                                                              extra_info))
5994
5995    if verbose: print 'Starting ScreenCatcher, all output will be stored in %s' \
5996                                     %(screen_output_name)
5997    #used to catch screen output to file
5998    sys.stdout = Screen_Catcher(screen_output_name)
5999    sys.stderr = Screen_Catcher(screen_error_name)
6000
6001class Screen_Catcher:
6002    """this simply catches the screen output and stores it to file defined by
6003    start_screen_catcher (above)
6004    """
6005   
6006    def __init__(self, filename):
6007        self.filename = filename
6008#        print 'init'
6009        if exists(self.filename)is True:
6010            print'Old existing file "%s" has been deleted' %(self.filename)
6011            remove(self.filename)
6012
6013    def write(self, stuff):
6014        fid = open(self.filename, 'a')
6015        fid.write(stuff)
6016        fid.close()
6017       
6018# FIXME (DSG): Add unit test, make general, not just 2 files,
6019# but any number of files.
6020def copy_code_files(dir_name, filename1, filename2=None):
6021    """Copies "filename1" and "filename2" to "dir_name". Very useful for
6022    information management
6023    filename1 and filename2 are both absolute pathnames   
6024    """
6025
6026    if access(dir_name,F_OK) == 0:
6027        print 'Make directory %s' %dir_name
6028        mkdir (dir_name,0777)
6029    shutil.copy(filename1, dir_name + sep + basename(filename1))
6030    if filename2!=None:
6031        shutil.copy(filename2, dir_name + sep + basename(filename2))
6032        print 'Files %s and %s copied' %(filename1, filename2)
6033    else:
6034        print 'File %s copied' %(filename1)
6035
6036def get_data_from_file(filename,separator_value = ','):
6037    """
6038    Read in data information from file and
6039   
6040    Returns:
6041        header_fields, a string? of the first line separated
6042        by the 'separator_value'
6043       
6044        data, a array (N data columns X M lines) in the file
6045        excluding the header
6046       
6047    NOTE: wont deal with columns with different lenghts and there must be
6048    no blank lines at the end.
6049    """
6050   
6051    fid = open(filename)
6052    lines = fid.readlines()
6053   
6054    fid.close()
6055   
6056    header_line = lines[0]
6057    header_fields = header_line.split(separator_value)
6058
6059    #array to store data, number in there is to allow float...
6060    #i'm sure there is a better way!
6061    data=array([],typecode=Float)
6062    data=resize(data,((len(lines)-1),len(header_fields)))
6063#    print 'number of fields',range(len(header_fields))
6064#    print 'number of lines',len(lines), shape(data)
6065#    print'data',data[1,1],header_line
6066
6067    array_number = 0
6068    line_number = 1
6069    while line_number < (len(lines)):
6070        for i in range(len(header_fields)): 
6071            #this get line below the header, explaining the +1
6072            #and also the line_number can be used as the array index
6073            fields = lines[line_number].split(separator_value)
6074            #assign to array
6075            data[array_number,i] = float(fields[i])
6076           
6077        line_number = line_number +1
6078        array_number = array_number +1
6079       
6080    return header_fields, data
6081
6082def store_parameters(verbose=False,**kwargs):
6083    """
6084    Store "kwargs" into a temp csv file, if "completed" is a kwargs csv file is
6085    kwargs[file_name] else it is kwargs[output_dir] + details_temp.csv
6086   
6087    Must have a file_name keyword arg, this is what is writing to.
6088    might be a better way to do this using CSV module Writer and writeDict
6089   
6090    writes file to "output_dir" unless "completed" is in kwargs, then
6091    it writes to "file_name" kwargs
6092
6093    """
6094    import types
6095#    import os
6096   
6097    # Check that kwargs is a dictionary
6098    if type(kwargs) != types.DictType:
6099        raise TypeError
6100   
6101    #is completed is kwargs?
6102    try:
6103        kwargs['completed']
6104        completed=True
6105    except:
6106        completed=False
6107 
6108    #get file name and removes from dict and assert that a file_name exists
6109    if completed:
6110        try:
6111            file = str(kwargs['file_name'])
6112        except:
6113            raise 'kwargs must have file_name'
6114    else:
6115        #write temp file in output directory
6116        try:
6117            file = str(kwargs['output_dir'])+'detail_temp.csv'
6118        except:
6119            raise 'kwargs must have output_dir'
6120       
6121    #extracts the header info and the new line info
6122    line=''
6123    header=''
6124    count=0
6125    keys = kwargs.keys()
6126    keys.sort()
6127   
6128    #used the sorted keys to create the header and line data
6129    for k in keys:
6130#        print "%s = %s" %(k, kwargs[k])
6131        header = header+str(k)
6132        line = line+str(kwargs[k])
6133        count+=1
6134        if count <len(kwargs):
6135            header = header+','
6136            line = line+','
6137    header+='\n'
6138    line+='\n'
6139
6140    # checks the header info, if the same, then write, if not create a new file
6141    #try to open!
6142    try:
6143        fid = open(file,"r")
6144        file_header=fid.readline()
6145        fid.close()
6146        if verbose: print 'read file header %s' %file_header
6147       
6148    except:
6149        msg = 'try to create new file',file
6150        if verbose: print msg
6151        #tries to open file, maybe directory is bad
6152        try:
6153            fid = open(file,"w")
6154            fid.write(header)
6155            fid.close()
6156            file_header=header
6157        except:
6158            msg = 'cannot create new file',file
6159            raise msg
6160           
6161    #if header is same or this is a new file
6162    if file_header==str(header):
6163        fid=open(file,"a")
6164        #write new line
6165        fid.write(line)
6166        fid.close()
6167    else:
6168        #backup plan,
6169        # if header is different and has completed will append info to
6170        #end of details_temp.cvs file in output directory
6171        file = str(kwargs['output_dir'])+'detail_temp.csv'
6172        fid=open(file,"a")
6173        fid.write(header)
6174        fid.write(line)
6175        fid.close()
6176        if verbose: print 'file',file_header.strip('\n')
6177        if verbose: print 'head',header.strip('\n')
6178        if file_header.strip('\n')==str(header): print 'they equal'
6179        msg = 'WARNING: File header does not match input info, the input variables have changed, suggest to change file name'
6180        print msg
6181
6182
6183
6184# ----------------------------------------------
6185# Functions to obtain diagnostics from sww files
6186#-----------------------------------------------
6187
6188def get_mesh_and_quantities_from_file(filename,
6189                                      quantities=None,
6190                                      verbose=False):
6191    """Get and rebuild mesh structure and the associated quantities from sww file
6192
6193    Input:
6194        filename - Name os sww file
6195        quantities - Names of quantities to load
6196
6197    Output:
6198        mesh - instance of class Interpolate
6199               (including mesh and interpolation functionality)
6200        quantities - arrays with quantity values at each mesh node
6201        time - vector of stored timesteps
6202    """
6203 
6204    # FIXME (Ole): Maybe refactor filefunction using this more fundamental code.
6205   
6206    import types
6207    from Scientific.IO.NetCDF import NetCDFFile
6208    from shallow_water import Domain
6209    from Numeric import asarray, transpose, resize
6210    from anuga.abstract_2d_finite_volumes.neighbour_mesh import Mesh
6211
6212    if verbose: print 'Reading from ', filename
6213    fid = NetCDFFile(filename, 'r')    # Open existing file for read
6214    time = fid.variables['time'][:]    # Time vector
6215    time += fid.starttime[0]
6216   
6217    # Get the variables as Numeric arrays
6218    x = fid.variables['x'][:]                   # x-coordinates of nodes
6219    y = fid.variables['y'][:]                   # y-coordinates of nodes
6220    elevation = fid.variables['elevation'][:]   # Elevation
6221    stage = fid.variables['stage'][:]           # Water level
6222    xmomentum = fid.variables['xmomentum'][:]   # Momentum in the x-direction
6223    ymomentum = fid.variables['ymomentum'][:]   # Momentum in the y-direction
6224
6225
6226    # Mesh (nodes (Mx2), triangles (Nx3))
6227    nodes = concatenate( (x[:, NewAxis], y[:, NewAxis]), axis=1 )
6228    triangles = fid.variables['volumes'][:]
6229   
6230    # Get geo_reference
6231    try:
6232        geo_reference = Geo_reference(NetCDFObject=fid)
6233    except: #AttributeError, e:
6234        # Sww files don't have to have a geo_ref
6235        geo_reference = None
6236
6237    if verbose: print '    building mesh from sww file %s' %filename
6238    boundary = None
6239
6240    #FIXME (Peter Row): Should this be in mesh?
6241    if fid.smoothing != 'Yes':
6242        nodes = nodes.tolist()
6243        triangles = triangles.tolist()
6244        nodes, triangles, boundary=weed(nodes, triangles, boundary)
6245
6246
6247    try:
6248        mesh = Mesh(nodes, triangles, boundary,
6249                    geo_reference=geo_reference)
6250    except AssertionError, e:
6251        fid.close()
6252        msg = 'Domain could not be created: %s. "' %e
6253        raise DataDomainError, msg
6254
6255
6256    quantities = {}
6257    quantities['elevation'] = elevation
6258    quantities['stage'] = stage   
6259    quantities['xmomentum'] = xmomentum
6260    quantities['ymomentum'] = ymomentum   
6261
6262    fid.close()
6263    return mesh, quantities, time
6264
6265
6266
6267def get_flow_through_cross_section(filename,
6268                                   polyline,
6269                                   verbose=False):
6270    """Obtain flow (m^3/s) perpendicular to specified cross section.
6271
6272    Inputs:
6273        filename: Name of sww file
6274        polyline: Representation of desired cross section - it may contain multiple
6275                  sections allowing for complex shapes. Assume absolute UTM coordinates.
6276                  Format [[x0, y0], [x1, y1], ...]
6277
6278    Output:
6279        time: All stored times in sww file
6280        Q: Hydrograph of total flow across given segments for all stored times.
6281
6282    The normal flow is computed for each triangle intersected by the polyline and
6283    added up.  Multiple segments at different angles are specified the normal flows
6284    may partially cancel each other.
6285
6286    The typical usage of this function would be to get flow through a channel,
6287    and the polyline would then be a cross section perpendicular to the flow.
6288
6289    """
6290
6291    from anuga.fit_interpolate.interpolate import Interpolation_function
6292
6293    quantity_names =['elevation',
6294                     'stage',
6295                     'xmomentum',
6296                     'ymomentum']
6297
6298    # Get mesh and quantities from sww file
6299    X = get_mesh_and_quantities_from_file(filename,
6300                                          quantities=quantity_names,
6301                                          verbose=verbose)
6302    mesh, quantities, time = X
6303
6304
6305    # Adjust polyline to mesh spatial origin
6306    polyline = mesh.geo_reference.get_relative(polyline)
6307   
6308    # Find all intersections and associated triangles.
6309    segments = mesh.get_intersecting_segments(polyline, verbose=verbose)
6310    #print
6311    #for x in segments:
6312    #    print x
6313
6314   
6315    # Then store for each triangle the length of the intersecting segment(s),
6316    # right hand normal(s) and midpoints as interpolation_points.
6317    interpolation_points = []
6318    for segment in segments:
6319        midpoint = sum(array(segment.segment))/2
6320        interpolation_points.append(midpoint)
6321
6322
6323    if verbose:
6324        print 'Interpolating - ',       
6325        print 'total number of interpolation points = %d'\
6326              %len(interpolation_points)
6327
6328    I = Interpolation_function(time,
6329                               quantities,
6330                               quantity_names=quantity_names,
6331                               vertex_coordinates=mesh.nodes,
6332                               triangles=mesh.triangles,
6333                               interpolation_points=interpolation_points,
6334                               verbose=verbose)
6335
6336    if verbose: print 'Computing hydrograph'
6337    # Compute hydrograph
6338    Q = []
6339    for t in time:
6340        total_flow=0
6341        for i, p in enumerate(interpolation_points):
6342            elevation, stage, uh, vh = I(t, point_id=i)
6343            normal = segments[i].normal
6344
6345            # Inner product of momentum vector with segment normal [m^2/s]
6346            normal_momentum = uh*normal[0] + vh*normal[1] 
6347
6348            # Flow across this segment [m^3/s]
6349            segment_flow = normal_momentum*segments[i].length
6350
6351            # Accumulate
6352            total_flow += segment_flow
6353             
6354
6355        # Store flow at this timestep   
6356        Q.append(total_flow)
6357   
6358
6359
6360    return time, Q
6361   
6362
6363
6364
6365
6366def get_maximum_inundation_elevation(filename,
6367                                     polygon=None,
6368                                     time_interval=None,
6369                                     verbose=False):
6370   
6371    """Return highest elevation where depth > 0
6372   
6373    Usage:
6374    max_runup = get_maximum_inundation_elevation(filename,
6375                                                 polygon=None,
6376                                                 time_interval=None,
6377                                                 verbose=False)
6378
6379    filename is a NetCDF sww file containing ANUGA model output.   
6380    Optional arguments polygon and time_interval restricts the maximum
6381    runup calculation
6382    to a points that lie within the specified polygon and time interval.
6383
6384    If no inundation is found within polygon and time_interval the return value
6385    is None signifying "No Runup" or "Everything is dry".
6386
6387    See general function get_maximum_inundation_data for details.
6388   
6389    """
6390   
6391    runup, _ = get_maximum_inundation_data(filename,
6392                                           polygon=polygon,
6393                                           time_interval=time_interval,
6394                                           verbose=verbose)
6395    return runup
6396
6397
6398
6399
6400def get_maximum_inundation_location(filename,
6401                                    polygon=None,
6402                                    time_interval=None,
6403                                    verbose=False):
6404    """Return location of highest elevation where h > 0
6405   
6406   
6407    Usage:
6408    max_runup_location = get_maximum_inundation_location(filename,
6409                                                         polygon=None,
6410                                                         time_interval=None,
6411                                                         verbose=False)
6412
6413    filename is a NetCDF sww file containing ANUGA model output.
6414    Optional arguments polygon and time_interval restricts the maximum
6415    runup calculation
6416    to a points that lie within the specified polygon and time interval.
6417
6418    If no inundation is found within polygon and time_interval the return value
6419    is None signifying "No Runup" or "Everything is dry".
6420
6421    See general function get_maximum_inundation_data for details.
6422    """
6423   
6424    _, max_loc = get_maximum_inundation_data(filename,
6425                                             polygon=polygon,
6426                                             time_interval=time_interval,
6427                                             verbose=verbose)
6428    return max_loc
6429   
6430
6431
6432def get_maximum_inundation_data(filename, polygon=None, time_interval=None,
6433                                use_centroid_values=False,
6434                                verbose=False):
6435    """Compute maximum run up height from sww file.
6436
6437
6438    Usage:
6439    runup, location = get_maximum_inundation_data(filename,
6440                                                  polygon=None,
6441                                                  time_interval=None,
6442                                                  verbose=False)
6443   
6444
6445    Algorithm is as in get_maximum_inundation_elevation from
6446    shallow_water_domain
6447    except that this function works with the sww file and computes the maximal
6448    runup height over multiple timesteps.
6449   
6450    Optional arguments polygon and time_interval restricts the
6451    maximum runup calculation
6452    to a points that lie within the specified polygon and time interval.
6453    Polygon is
6454    assumed to be in (absolute) UTM coordinates in the same zone as domain.
6455
6456    If no inundation is found within polygon and time_interval the return value
6457    is None signifying "No Runup" or "Everything is dry".
6458    """
6459
6460    # We are using nodal values here as that is what is stored in sww files.
6461
6462    # Water depth below which it is considered to be 0 in the model
6463    # FIXME (Ole): Allow this to be specified as a keyword argument as well
6464
6465    from anuga.utilities.polygon import inside_polygon   
6466    from anuga.config import minimum_allowed_height
6467    from Scientific.IO.NetCDF import NetCDFFile
6468
6469    dir, base = os.path.split(filename)
6470           
6471    iterate_over = get_all_swwfiles(dir,base)
6472   
6473    # Read sww file
6474    if verbose: 
6475        print 'Reading from %s' %filename
6476        # FIXME: Use general swwstats (when done)
6477   
6478    maximal_runup = None
6479    maximal_runup_location = None
6480   
6481    for file, swwfile in enumerate (iterate_over):
6482       
6483        # Read sww file
6484        filename = join(dir,swwfile+'.sww')
6485       
6486        if verbose: 
6487            print 'Reading from %s' %filename
6488            # FIXME: Use general swwstats (when done)
6489               
6490        fid = NetCDFFile(filename)
6491   
6492        # Get geo_reference
6493        # sww files don't have to have a geo_ref
6494        try:
6495            geo_reference = Geo_reference(NetCDFObject=fid)
6496        except AttributeError, e:
6497            geo_reference = Geo_reference() # Default georef object
6498           
6499        xllcorner = geo_reference.get_xllcorner()
6500        yllcorner = geo_reference.get_yllcorner()
6501        zone = geo_reference.get_zone()
6502       
6503        # Get extent
6504        volumes = fid.variables['volumes'][:]   
6505        x = fid.variables['x'][:] + xllcorner
6506        y = fid.variables['y'][:] + yllcorner
6507   
6508   
6509        # Get the relevant quantities (Convert from single precison)
6510        elevation = array(fid.variables['elevation'][:], Float) 
6511        stage = array(fid.variables['stage'][:], Float)
6512   
6513   
6514        # Here's where one could convert nodal information to centroid
6515        # information
6516        # but is probably something we need to write in C.
6517        # Here's a Python thought which is NOT finished!!!
6518        if use_centroid_values is True:
6519            x = get_centroid_values(x, volumes)
6520            y = get_centroid_values(y, volumes)   
6521            elevation = get_centroid_values(elevation, volumes)   
6522   
6523   
6524        # Spatial restriction
6525        if polygon is not None:
6526            msg = 'polygon must be a sequence of points.'
6527            assert len(polygon[0]) == 2, msg
6528            # FIXME (Ole): Make a generic polygon input check in polygon.py
6529            # and call it here
6530           
6531            points = concatenate((x[:,NewAxis], y[:,NewAxis]), axis=1)
6532   
6533            point_indices = inside_polygon(points, polygon)
6534   
6535            # Restrict quantities to polygon
6536            elevation = take(elevation, point_indices)
6537            stage = take(stage, point_indices, axis=1)
6538   
6539            # Get info for location of maximal runup
6540            points_in_polygon = take(points, point_indices)
6541            x = points_in_polygon[:,0]
6542            y = points_in_polygon[:,1]       
6543        else:
6544            # Take all points
6545            point_indices = arange(len(x))
6546           
6547   
6548        # Temporal restriction
6549        time = fid.variables['time'][:]
6550        all_timeindices = arange(len(time))       
6551        if time_interval is not None:
6552           
6553            msg = 'time_interval must be a sequence of length 2.'
6554            assert len(time_interval) == 2, msg
6555            msg = 'time_interval %s must not be decreasing.' %(time_interval)
6556            assert time_interval[1] >= time_interval[0], msg
6557           
6558            msg = 'Specified time interval [%.8f:%.8f]' %tuple(time_interval)
6559            msg += ' must does not match model time interval: [%.8f, %.8f]\n'\
6560                   %(time[0], time[-1])
6561            if time_interval[1] < time[0]: raise ValueError(msg)
6562            if time_interval[0] > time[-1]: raise ValueError(msg)
6563   
6564            # Take time indices corresponding to interval (& is bitwise AND)
6565            timesteps = compress((time_interval[0] <= time) & (time <= time_interval[1]),
6566                                 all_timeindices)
6567   
6568   
6569            msg = 'time_interval %s did not include any model timesteps.' %(time_interval)       
6570            assert not alltrue(timesteps == 0), msg
6571   
6572   
6573        else:
6574            # Take them all
6575            timesteps = all_timeindices
6576       
6577   
6578        fid.close()
6579   
6580        # Compute maximal runup for each timestep
6581        #maximal_runup = None
6582        #maximal_runup_location = None
6583        #maximal_runups = [None]
6584        #maximal_runup_locations = [None]
6585       
6586        for i in timesteps:
6587            if use_centroid_values is True:
6588                stage_i = get_centroid_values(stage[i,:], volumes)   
6589            else:
6590                stage_i = stage[i,:]
6591               
6592            depth = stage_i  - elevation
6593       
6594            # Get wet nodes i.e. nodes with depth>0 within given region and timesteps
6595            wet_nodes = compress(depth > minimum_allowed_height, arange(len(depth)))
6596   
6597            if alltrue(wet_nodes == 0):
6598                runup = None
6599            else:   
6600                # Find maximum elevation among wet nodes
6601                wet_elevation = take(elevation, wet_nodes)
6602   
6603                runup_index = argmax(wet_elevation)
6604                runup = max(wet_elevation)
6605                assert wet_elevation[runup_index] == runup # Must be True
6606            #print "runup", runup
6607            #print "maximal_runup", maximal_runup
6608           
6609            if runup > maximal_runup:
6610                maximal_runup = runup      # This works even if maximal_runups is None
6611                #print "NEW RUNUP",runup
6612   
6613                # Record location
6614                wet_x = take(x, wet_nodes)
6615                wet_y = take(y, wet_nodes)           
6616                maximal_runup_location = [wet_x[runup_index], wet_y[runup_index]]
6617   
6618    #print 'maximal_runup, maximal_runup_location',maximal_runup, maximal_runup_location
6619    return maximal_runup, maximal_runup_location
6620
6621def get_all_swwfiles(look_in_dir='',base_name='',verbose=False):
6622    '''
6623    Finds all the sww files in a "look_in_dir" which contains a "base_name".
6624    will accept base_name with or without the extension ".sww"
6625   
6626    Returns: a list of strings
6627       
6628    Usage:     iterate_over = get_all_swwfiles(dir, name)
6629    then
6630               for swwfile in iterate_over:
6631                   do stuff
6632                   
6633    Check "export_grids" and "get_maximum_inundation_data" for examples
6634    '''
6635   
6636    #plus tests the extension
6637    name, extension = os.path.splitext(base_name)
6638
6639    if extension <>'' and extension <> '.sww':
6640        msg = msg='file %s %s must be an NetCDF sww file!'%(base_name,extension)
6641        raise IOError, msg
6642
6643    if look_in_dir == "":
6644        look_in_dir = "." # Unix compatibility
6645   
6646    dir_ls = os.listdir(look_in_dir)
6647    #print 'dir_ls',dir_ls, base
6648    iterate_over = [x[:-4] for x in dir_ls if name in x and x[-4:] == '.sww']
6649    if len(iterate_over) == 0:
6650        msg = 'No files of the base name %s'\
6651              %(name)
6652        raise IOError, msg
6653    if verbose: print 'iterate over %s' %(iterate_over)
6654
6655    return iterate_over
6656
6657def get_all_files_with_extension(look_in_dir='',base_name='',extension='.sww',verbose=False):
6658    '''
6659    Finds all the sww files in a "look_in_dir" which contains a "base_name".
6660   
6661   
6662    Returns: a list of strings
6663       
6664    Usage:     iterate_over = get_all_swwfiles(dir, name)
6665    then
6666               for swwfile in iterate_over:
6667                   do stuff
6668                   
6669    Check "export_grids" and "get_maximum_inundation_data" for examples
6670    '''
6671   
6672    #plus tests the extension
6673    name, ext = os.path.splitext(base_name)
6674#    print 'look_in_dir',look_in_dir
6675
6676    if ext <>'' and ext <> extension:
6677        msg = msg='base_name %s must be an file with %s extension!'%(base_name,extension)
6678        raise IOError, msg
6679
6680    if look_in_dir == "":
6681        look_in_dir = "." # Unix compatibility
6682#    print 'look_in_dir',look_in_dir, getcwd()
6683    dir_ls = os.listdir(look_in_dir)
6684    #print 'dir_ls',dir_ls, base_name
6685    iterate_over = [x[:-4] for x in dir_ls if name in x and x[-4:] == extension]
6686    if len(iterate_over) == 0:
6687        msg = 'No files of the base name %s in %s'\
6688              %(name, look_in_dir)
6689        raise IOError, msg
6690    if verbose: print 'iterate over %s' %(iterate_over)
6691
6692    return iterate_over
6693
6694def get_all_directories_with_name(look_in_dir='',base_name='',verbose=False):
6695    '''
6696    Finds all the sww files in a "look_in_dir" which contains a "base_name".
6697   
6698   
6699    Returns: a list of strings
6700       
6701    Usage:     iterate_over = get_all_swwfiles(dir, name)
6702    then
6703               for swwfile in iterate_over:
6704                   do stuff
6705                   
6706    Check "export_grids" and "get_maximum_inundation_data" for examples
6707    '''
6708   
6709    #plus tests the extension
6710
6711    if look_in_dir == "":
6712        look_in_dir = "." # Unix compatibility
6713#    print 'look_in_dir',look_in_dir
6714    dir_ls = os.listdir(look_in_dir)
6715#    print 'dir_ls',dir_ls
6716    iterate_over = [x for x in dir_ls if base_name in x]
6717    if len(iterate_over) == 0:
6718        msg = 'No files of the base name %s'\
6719              %(name)
6720        raise IOError, msg
6721    if verbose: print 'iterate over %s' %(iterate_over)
6722
6723    return iterate_over
6724
6725def points2polygon(points_file,
6726                    minimum_triangle_angle=3.0):
6727    """
6728    WARNING: This function is not fully working. 
6729   
6730    Function to return a polygon returned from alpha shape, given a points file.
6731   
6732    WARNING: Alpha shape returns multiple polygons, but this function only returns one polygon.
6733   
6734    """
6735    from anuga.pmesh.mesh import Mesh, importMeshFromFile
6736    from anuga.shallow_water import Domain   
6737    from anuga.pmesh.mesh_interface import create_mesh_from_regions
6738   
6739    mesh = importMeshFromFile(points_file)
6740    mesh.auto_segment()
6741    mesh.exportASCIIsegmentoutlinefile("outline.tsh")
6742    mesh2 = importMeshFromFile("outline.tsh")
6743    mesh2.generate_mesh(maximum_triangle_area=1000000000, minimum_triangle_angle=minimum_triangle_angle, verbose=False)
6744    mesh2.export_mesh_file('outline_meshed.tsh')
6745    domain = Domain("outline_meshed.tsh", use_cache = False)
6746    polygon =  domain.get_boundary_polygon()
6747    return polygon
6748
6749#-------------------------------------------------------------
6750if __name__ == "__main__":
6751    #setting umask from config to force permissions for all files and directories
6752    # created to the same. (it was noticed the "mpirun" doesn't honour the umask
6753    # set in your .bashrc etc file)
6754    from config import umask
6755    import os 
6756    os.umask(umask)
Note: See TracBrowser for help on using the repository browser.