source: anuga_core/source/anuga/shallow_water/data_manager.py @ 4668

Last change on this file since 4668 was 4665, checked in by duncan, 18 years ago

working on ticket#176

File size: 188.2 KB
Line 
1"""datamanager.py - input output for AnuGA
2
3
4This module takes care of reading and writing datafiles such as topograhies,
5model output, etc
6
7
8Formats used within AnuGA:
9
10.sww: Netcdf format for storing model output f(t,x,y)
11.tms: Netcdf format for storing time series f(t)
12
13.csv: ASCII format for storing arbitrary points and associated attributes
14.pts: NetCDF format for storing arbitrary points and associated attributes
15
16.asc: ASCII format of regular DEMs as output from ArcView
17.prj: Associated ArcView file giving more meta data for asc format
18.ers: ERMapper header format of regular DEMs for ArcView
19
20.dem: NetCDF representation of regular DEM data
21
22.tsh: ASCII format for storing meshes and associated boundary and region info
23.msh: NetCDF format for storing meshes and associated boundary and region info
24
25.nc: Native ferret NetCDF format
26.geo: Houdinis ascii geometry format (?)
27
28
29A typical dataflow can be described as follows
30
31Manually created files:
32ASC, PRJ:     Digital elevation models (gridded)
33TSH:          Triangular meshes (e.g. created from anuga.pmesh)
34NC            Model outputs for use as boundary conditions (e.g from MOST)
35
36
37AUTOMATICALLY CREATED FILES:
38
39ASC, PRJ  ->  DEM  ->  PTS: Conversion of DEM's to native pts file
40
41NC -> SWW: Conversion of MOST bundary files to boundary sww
42
43PTS + TSH -> TSH with elevation: Least squares fit
44
45TSH -> SWW:  Conversion of TSH to sww viewable using Swollen
46
47TSH + Boundary SWW -> SWW: Simluation using abstract_2d_finite_volumes
48
49"""
50
51import exceptions
52class TitleValueError(exceptions.Exception): pass
53class DataMissingValuesError(exceptions.Exception): pass
54class DataFileNotOpenError(exceptions.Exception): pass
55class DataTimeError(exceptions.Exception): pass
56class DataDomainError(exceptions.Exception): pass
57class NewQuantity(exceptions.Exception): pass
58
59
60
61import csv
62import os, sys
63import shutil
64from struct import unpack
65import array as p_array
66#import time, os
67from os import sep, path, remove, mkdir, access, F_OK, W_OK, getcwd
68
69
70from Numeric import concatenate, array, Float, Int, Int32, resize, sometrue, \
71     searchsorted, zeros, allclose, around, reshape, transpose, sort, \
72     NewAxis, ArrayType, compress, take, arange, argmax, alltrue,shape,Float
73
74import string
75
76from Scientific.IO.NetCDF import NetCDFFile
77#from shutil import copy
78from os.path import exists, basename, join
79from os import getcwd
80
81
82from anuga.coordinate_transforms.redfearn import redfearn, \
83     convert_from_latlon_to_utm
84from anuga.coordinate_transforms.geo_reference import Geo_reference, \
85     write_NetCDF_georeference, ensure_geo_reference
86from anuga.geospatial_data.geospatial_data import Geospatial_data,\
87     ensure_absolute
88from anuga.config import minimum_storable_height as default_minimum_storable_height
89from anuga.utilities.numerical_tools import ensure_numeric,  mean
90from anuga.caching.caching import myhash
91from anuga.utilities.anuga_exceptions import ANUGAError
92from anuga.shallow_water import Domain
93from anuga.abstract_2d_finite_volumes.pmesh2domain import \
94     pmesh_to_domain_instance
95from anuga.abstract_2d_finite_volumes.util import get_revision_number, \
96     remove_lone_verts, sww2timeseries, get_centroid_values
97from anuga.load_mesh.loadASCII import export_mesh_file
98# formula mappings
99
100quantity_formula = {'momentum':'(xmomentum**2 + ymomentum**2)**0.5',
101                    'depth':'stage-elevation',
102                    'speed': \
103 '(xmomentum**2 + ymomentum**2)**0.5/(stage-elevation+1.e-6/(stage-elevation))'}
104
105
106   
107def make_filename(s):
108    """Transform argument string into a Sexsuitable filename
109    """
110
111    s = s.strip()
112    s = s.replace(' ', '_')
113    s = s.replace('(', '')
114    s = s.replace(')', '')
115    s = s.replace('__', '_')
116
117    return s
118
119
120def check_dir(path, verbose=None):
121    """Check that specified path exists.
122    If path does not exist it will be created if possible
123
124    USAGE:
125       checkdir(path, verbose):
126
127    ARGUMENTS:
128        path -- Directory
129        verbose -- Flag verbose output (default: None)
130
131    RETURN VALUE:
132        Verified path including trailing separator
133
134    """
135
136    import os.path
137
138    if sys.platform in ['nt', 'dos', 'win32', 'what else?']:
139        unix = 0
140    else:
141        unix = 1
142
143
144    if path[-1] != os.sep:
145        path = path + os.sep  # Add separator for directories
146
147    path = os.path.expanduser(path) # Expand ~ or ~user in pathname
148    if not (os.access(path,os.R_OK and os.W_OK) or path == ''):
149        try:
150            exitcode=os.mkdir(path)
151
152            # Change access rights if possible
153            #
154            if unix:
155                exitcode=os.system('chmod 775 '+path)
156            else:
157                pass  # FIXME: What about acces rights under Windows?
158
159            if verbose: print 'MESSAGE: Directory', path, 'created.'
160
161        except:
162            print 'WARNING: Directory', path, 'could not be created.'
163            if unix:
164                path = '/tmp/'
165            else:
166                path = 'C:'
167
168            print 'Using directory %s instead' %path
169
170    return(path)
171
172
173
174def del_dir(path):
175    """Recursively delete directory path and all its contents
176    """
177
178    import os
179
180    if os.path.isdir(path):
181        for file in os.listdir(path):
182            X = os.path.join(path, file)
183
184
185            if os.path.isdir(X) and not os.path.islink(X):
186                del_dir(X)
187            else:
188                try:
189                    os.remove(X)
190                except:
191                    print "Could not remove file %s" %X
192
193        os.rmdir(path)
194       
195       
196# ANOTHER OPTION, IF NEED IN THE FUTURE, Nick B 7/2007   
197def rmgeneric(path, __func__,verbose=False):
198    ERROR_STR= """Error removing %(path)s, %(error)s """
199
200    try:
201        __func__(path)
202        if verbose: print 'Removed ', path
203    except OSError, (errno, strerror):
204        print ERROR_STR % {'path' : path, 'error': strerror }
205           
206def removeall(path,verbose=False):
207
208    if not os.path.isdir(path):
209        return
210   
211    files=os.listdir(path)
212
213    for x in files:
214        fullpath=os.path.join(path, x)
215        if os.path.isfile(fullpath):
216            f=os.remove
217            rmgeneric(fullpath, f)
218        elif os.path.isdir(fullpath):
219            removeall(fullpath)
220            f=os.rmdir
221            rmgeneric(fullpath, f,verbose)
222
223
224
225def create_filename(datadir, filename, format, size=None, time=None):
226
227    import os
228    #from anuga.config import data_dir
229
230    FN = check_dir(datadir) + filename
231
232    if size is not None:
233        FN += '_size%d' %size
234
235    if time is not None:
236        FN += '_time%.2f' %time
237
238    FN += '.' + format
239    return FN
240
241
242def get_files(datadir, filename, format, size):
243    """Get all file (names) with given name, size and format
244    """
245
246    import glob
247
248    import os
249    #from anuga.config import data_dir
250
251    dir = check_dir(datadir)
252
253    pattern = dir + os.sep + filename + '_size=%d*.%s' %(size, format)
254    return glob.glob(pattern)
255
256
257
258#Generic class for storing output to e.g. visualisation or checkpointing
259class Data_format:
260    """Generic interface to data formats
261    """
262
263
264    def __init__(self, domain, extension, mode = 'w'):
265        assert mode in ['r', 'w', 'a'], '''Mode %s must be either:''' %mode +\
266                                        '''   'w' (write)'''+\
267                                        '''   'r' (read)''' +\
268                                        '''   'a' (append)'''
269
270        #Create filename
271        self.filename = create_filename(domain.get_datadir(),
272                                        domain.get_name(), extension)
273
274        #print 'F', self.filename
275        self.timestep = 0
276        self.domain = domain
277       
278
279
280        # Exclude ghosts in case this is a parallel domain
281        self.number_of_nodes = domain.number_of_full_nodes       
282        self.number_of_volumes = domain.number_of_full_triangles
283        #self.number_of_volumes = len(domain)       
284
285
286
287
288        #FIXME: Should we have a general set_precision function?
289
290
291
292#Class for storing output to e.g. visualisation
293class Data_format_sww(Data_format):
294    """Interface to native NetCDF format (.sww) for storing model output
295
296    There are two kinds of data
297
298    1: Constant data: Vertex coordinates and field values. Stored once
299    2: Variable data: Conserved quantities. Stored once per timestep.
300
301    All data is assumed to reside at vertex locations.
302    """
303
304
305    def __init__(self, domain, mode = 'w',\
306                 max_size = 2000000000,
307                 recursion = False):
308        from Scientific.IO.NetCDF import NetCDFFile
309        from Numeric import Int, Float, Float32
310
311        self.precision = Float32 #Use single precision for quantities
312        if hasattr(domain, 'max_size'):
313            self.max_size = domain.max_size #file size max is 2Gig
314        else:
315            self.max_size = max_size
316        self.recursion = recursion
317        self.mode = mode
318
319        Data_format.__init__(self, domain, 'sww', mode)
320
321        if hasattr(domain, 'minimum_storable_height'):
322            self.minimum_storable_height =  domain.minimum_storable_height
323        else:
324            self.minimum_storable_height = default_minimum_storable_height
325
326        # NetCDF file definition
327        fid = NetCDFFile(self.filename, mode)
328
329        if mode == 'w':
330            description = 'Output from anuga.abstract_2d_finite_volumes suitable for plotting'
331            self.writer = Write_sww()
332            self.writer.header(fid, domain.starttime,
333                                         self.number_of_volumes,
334                                         self.domain.number_of_full_nodes,
335                                         description=description,
336                                         smoothing=domain.smooth,
337                                         order=domain.default_order)
338            if hasattr(domain, 'texture'):
339                fid.texture = domain.texture               
340    #        if domain.geo_reference is not None:
341    #            domain.geo_reference.write_NetCDF(fid)
342           
343#             fid.institution = 'Geoscience Australia'
344#             fid.description = 'Output from anuga.abstract_2d_finite_volumes suitable for plotting'
345
346#             if domain.smooth:
347#                 fid.smoothing = 'Yes'
348#             else:
349#                 fid.smoothing = 'No'
350
351#             fid.order = domain.default_order
352
353#             #Reference point
354#             #Start time in seconds since the epoch (midnight 1/1/1970)
355#             #FIXME: Use Georef
356#             fid.starttime = domain.starttime
357
358#             # dimension definitions
359#             fid.createDimension('number_of_volumes', self.number_of_volumes)
360#             fid.createDimension('number_of_vertices', 3)
361
362#             if domain.smooth is True:
363#                 #fid.createDimension('number_of_points', len(domain.vertexlist))
364#                 fid.createDimension('number_of_points', self.domain.number_of_full_nodes)
365
366#                 # FIXME(Ole): This will cause sww files for paralle domains to
367#                 # have ghost nodes stored (but not used by triangles).
368#                 # To clean this up, we have to change get_vertex_values and friends in
369#                 # quantity.py (but I can't be bothered right now)
370#             else:
371#                 fid.createDimension('number_of_points', 3*self.number_of_volumes)
372
373#             fid.createDimension('number_of_timesteps', None) #extensible
374
375#             # variable definitions
376#             fid.createVariable('x', self.precision, ('number_of_points',))
377#             fid.createVariable('y', self.precision, ('number_of_points',))
378#             fid.createVariable('elevation', self.precision, ('number_of_points',))
379#             if domain.geo_reference is not None:
380#                 domain.geo_reference.write_NetCDF(fid)
381
382#             #FIXME: Backwards compatibility
383#             fid.createVariable('z', self.precision, ('number_of_points',))
384#             #################################
385
386#             fid.createVariable('volumes', Int, ('number_of_volumes',
387#                                                 'number_of_vertices'))
388
389#             fid.createVariable('time', Float,  # Always use full precision lest two timesteps
390#                                              # close to each other may appear as the same step
391#                                ('number_of_timesteps',))
392
393#             fid.createVariable('stage', self.precision,
394#                                ('number_of_timesteps',
395#                                 'number_of_points'))
396
397#             fid.createVariable('xmomentum', self.precision,
398#                                ('number_of_timesteps',
399#                                 'number_of_points'))
400
401#             fid.createVariable('ymomentum', self.precision,
402#                                ('number_of_timesteps',
403#                                 'number_of_points'))
404
405        #Close
406        fid.close()
407
408
409    def store_connectivity(self):
410        """Specialisation of store_connectivity for net CDF format
411
412        Writes x,y,z coordinates of triangles constituting
413        the bed elevation.
414        """
415
416        from Scientific.IO.NetCDF import NetCDFFile
417
418        from Numeric import concatenate, Int
419
420        domain = self.domain
421
422        #Get NetCDF
423        fid = NetCDFFile(self.filename, 'a')  #Open existing file for append
424
425        # Get the variables
426        x = fid.variables['x']
427        y = fid.variables['y']
428        z = fid.variables['elevation']
429
430        volumes = fid.variables['volumes']
431
432        # Get X, Y and bed elevation Z
433        Q = domain.quantities['elevation']
434        X,Y,Z,V = Q.get_vertex_values(xy=True,
435                                      precision=self.precision)
436
437        #
438        points = concatenate( (X[:,NewAxis],Y[:,NewAxis]), axis=1 )
439        self.writer.triangulation(fid, points,
440                                            V.astype(volumes.typecode()),
441                                            Z,
442                                            points_georeference= \
443                                            domain.geo_reference)
444        #
445                               
446#         volumes[:] = V.astype(volumes.typecode())
447#         x[:] = X
448#         y[:] = Y
449#         z[:] = Z
450
451#         #FIXME: Backwards compatibility
452#         z = fid.variables['z']
453#         z[:] = Z
454        ################################
455
456
457
458        #Close
459        fid.close()
460
461    def store_timestep(self, names):
462        """Store time and named quantities to file
463        """
464        from Scientific.IO.NetCDF import NetCDFFile
465        import types
466        from time import sleep
467        from os import stat
468
469        from Numeric import choose
470       
471        #Get NetCDF       
472        retries = 0
473        file_open = False
474        while not file_open and retries < 10:
475            try:
476                fid = NetCDFFile(self.filename, 'a') #Open existing file
477            except IOError:
478                #This could happen if someone was reading the file.
479                #In that case, wait a while and try again
480                msg = 'Warning (store_timestep): File %s could not be opened'\
481                      %self.filename
482                msg += ' - trying step %s again' %self.domain.time
483                print msg
484                retries += 1
485                sleep(1)
486            else:
487                file_open = True
488
489        if not file_open:
490            msg = 'File %s could not be opened for append' %self.filename
491            raise DataFileNotOpenError, msg
492
493
494
495        #Check to see if the file is already too big:
496        time = fid.variables['time']
497        i = len(time)+1
498        file_size = stat(self.filename)[6]
499        file_size_increase =  file_size/i
500        if file_size + file_size_increase > self.max_size*(2**self.recursion):
501            #in order to get the file name and start time correct,
502            #I change the domian.filename and domain.starttime.
503            #This is the only way to do this without changing
504            #other modules (I think).
505
506            #write a filename addon that won't break swollens reader
507            #(10.sww is bad)
508            filename_ext = '_time_%s'%self.domain.time
509            filename_ext = filename_ext.replace('.', '_')
510            #remember the old filename, then give domain a
511            #name with the extension
512            old_domain_filename = self.domain.get_name()
513            if not self.recursion:
514                self.domain.set_name(old_domain_filename+filename_ext)
515
516
517            #change the domain starttime to the current time
518            old_domain_starttime = self.domain.starttime
519            self.domain.starttime = self.domain.time
520
521            #build a new data_structure.
522            next_data_structure=\
523                Data_format_sww(self.domain, mode=self.mode,\
524                                max_size = self.max_size,\
525                                recursion = self.recursion+1)
526            if not self.recursion:
527                print '    file_size = %s'%file_size
528                print '    saving file to %s'%next_data_structure.filename
529            #set up the new data_structure
530            self.domain.writer = next_data_structure
531
532            #FIXME - could be cleaner to use domain.store_timestep etc.
533            next_data_structure.store_connectivity()
534            next_data_structure.store_timestep(names)
535            fid.sync()
536            fid.close()
537
538            #restore the old starttime and filename
539            self.domain.starttime = old_domain_starttime
540            self.domain.set_name(old_domain_filename)           
541        else:
542            self.recursion = False
543            domain = self.domain
544
545            # Get the variables
546            time = fid.variables['time']
547            stage = fid.variables['stage']
548            xmomentum = fid.variables['xmomentum']
549            ymomentum = fid.variables['ymomentum']
550            i = len(time)
551            if type(names) not in [types.ListType, types.TupleType]:
552                names = [names]
553
554            if 'stage' in names and 'xmomentum' in names and \
555                   'ymomentum' in names:
556
557                # Get stage
558                Q = domain.quantities['stage']
559                A,_ = Q.get_vertex_values(xy = False,
560                                          precision = self.precision)               
561                z = fid.variables['elevation']
562                stage = choose(A-z[:] >= self.minimum_storable_height,
563                           (z[:], A))
564               
565                # Get xmomentum
566                Q = domain.quantities['xmomentum']
567                xmomentum, _ = Q.get_vertex_values(xy = False,
568                                          precision = self.precision)
569               
570                # Get ymomentum
571                Q = domain.quantities['ymomentum']
572                ymomentum, _ = Q.get_vertex_values(xy = False,
573                                          precision = self.precision)
574                self.writer.quantities(fid, 
575                                                 time=self.domain.time,
576                                                 precision=self.precision,
577                                                 stage=stage,
578                                                 xmomentum=xmomentum,
579                                                 ymomentum=ymomentum)
580            else:
581                # This is producing a sww that is not standard.
582                #Store time
583                time[i] = self.domain.time
584               
585                for name in names:
586                    # Get quantity
587                    Q = domain.quantities[name]
588                    A,V = Q.get_vertex_values(xy = False,
589                                          precision = self.precision)
590
591                    #FIXME: Make this general (see below)
592                    if name == 'stage':
593                        z = fid.variables['elevation']
594                        A = choose(A-z[:] >= self.minimum_storable_height,
595                                   (z[:], A))
596                        stage[i,:] = A.astype(self.precision)
597                    elif name == 'xmomentum':
598                        xmomentum[i,:] = A.astype(self.precision)
599                    elif name == 'ymomentum':
600                        ymomentum[i,:] = A.astype(self.precision)
601
602                   #As in....
603                   #eval( name + '[i,:] = A.astype(self.precision)' )
604                   #FIXME: But we need a UNIT test for that before
605                   # refactoring
606
607
608
609            #Flush and close
610            fid.sync()
611            fid.close()
612
613
614
615#Class for handling checkpoints data
616class Data_format_cpt(Data_format):
617    """Interface to native NetCDF format (.cpt)
618    """
619
620
621    def __init__(self, domain, mode = 'w'):
622        from Scientific.IO.NetCDF import NetCDFFile
623        from Numeric import Int, Float, Float
624
625        self.precision = Float #Use full precision
626
627        Data_format.__init__(self, domain, 'sww', mode)
628
629
630        # NetCDF file definition
631        fid = NetCDFFile(self.filename, mode)
632
633        if mode == 'w':
634            #Create new file
635            fid.institution = 'Geoscience Australia'
636            fid.description = 'Checkpoint data'
637            #fid.smooth = domain.smooth
638            fid.order = domain.default_order
639
640            # dimension definitions
641            fid.createDimension('number_of_volumes', self.number_of_volumes)
642            fid.createDimension('number_of_vertices', 3)
643
644            #Store info at all vertices (no smoothing)
645            fid.createDimension('number_of_points', 3*self.number_of_volumes)
646            fid.createDimension('number_of_timesteps', None) #extensible
647
648            # variable definitions
649
650            #Mesh
651            fid.createVariable('x', self.precision, ('number_of_points',))
652            fid.createVariable('y', self.precision, ('number_of_points',))
653
654
655            fid.createVariable('volumes', Int, ('number_of_volumes',
656                                                'number_of_vertices'))
657
658            fid.createVariable('time', self.precision,
659                               ('number_of_timesteps',))
660
661            #Allocate space for all quantities
662            for name in domain.quantities.keys():
663                fid.createVariable(name, self.precision,
664                                   ('number_of_timesteps',
665                                    'number_of_points'))
666
667        #Close
668        fid.close()
669
670
671    def store_checkpoint(self):
672        """
673        Write x,y coordinates of triangles.
674        Write connectivity (
675        constituting
676        the bed elevation.
677        """
678
679        from Scientific.IO.NetCDF import NetCDFFile
680
681        from Numeric import concatenate
682
683        domain = self.domain
684
685        #Get NetCDF
686        fid = NetCDFFile(self.filename, 'a')  #Open existing file for append
687
688        # Get the variables
689        x = fid.variables['x']
690        y = fid.variables['y']
691
692        volumes = fid.variables['volumes']
693
694        # Get X, Y and bed elevation Z
695        Q = domain.quantities['elevation']
696        X,Y,Z,V = Q.get_vertex_values(xy=True,
697                      precision = self.precision)
698
699
700
701        x[:] = X.astype(self.precision)
702        y[:] = Y.astype(self.precision)
703        z[:] = Z.astype(self.precision)
704
705        volumes[:] = V
706
707        #Close
708        fid.close()
709
710
711    def store_timestep(self, name):
712        """Store time and named quantity to file
713        """
714        from Scientific.IO.NetCDF import NetCDFFile
715        from time import sleep
716
717        #Get NetCDF
718        retries = 0
719        file_open = False
720        while not file_open and retries < 10:
721            try:
722                fid = NetCDFFile(self.filename, 'a') #Open existing file
723            except IOError:
724                #This could happen if someone was reading the file.
725                #In that case, wait a while and try again
726                msg = 'Warning (store_timestep): File %s could not be opened'\
727                  %self.filename
728                msg += ' - trying again'
729                print msg
730                retries += 1
731                sleep(1)
732            else:
733                file_open = True
734
735        if not file_open:
736            msg = 'File %s could not be opened for append' %self.filename
737            raise DataFileNotOPenError, msg
738
739
740        domain = self.domain
741
742        # Get the variables
743        time = fid.variables['time']
744        stage = fid.variables['stage']
745        i = len(time)
746
747        #Store stage
748        time[i] = self.domain.time
749
750        # Get quantity
751        Q = domain.quantities[name]
752        A,V = Q.get_vertex_values(xy=False,
753                  precision = self.precision)
754
755        stage[i,:] = A.astype(self.precision)
756
757        #Flush and close
758        fid.sync()
759        fid.close()
760
761
762#### NED is national exposure database (name changed to NEXIS)
763   
764LAT_TITLE = 'LATITUDE'
765LONG_TITLE = 'LONGITUDE'
766X_TITLE = 'x'
767Y_TITLE = 'y'
768class Exposure_csv:
769    def __init__(self,file_name, latitude_title=LAT_TITLE,
770                 longitude_title=LONG_TITLE, is_x_y_locations=None,
771                 x_title=X_TITLE, y_title=Y_TITLE,
772                 refine_polygon=None, title_check_list=None):
773        """
774        This class is for handling the exposure csv file.
775        It reads the file in and converts the lats and longs to a geospatial
776        data object.
777        Use the methods to read and write columns.
778
779        The format of the csv files it reads is;
780           The first row is a title row.
781           comma's are the delimiters
782           each column is a 'set' of data
783
784        Feel free to use/expand it to read other csv files.
785           
786           
787        It is not for adding and deleting rows
788       
789        Can geospatial handle string attributes? It's not made for them.
790        Currently it can't load and save string att's.
791
792        So just use geospatial to hold the x, y and georef? Bad, since
793        different att's are in diferent structures.  Not so bad, the info
794        to write if the .csv file is saved is in attribute_dic
795
796        The location info is in the geospatial attribute.
797       
798       
799        """
800        self._file_name = file_name
801        self._geospatial = None #
802
803        # self._attribute_dic is a dictionary.
804        #The keys are the column titles.
805        #The values are lists of column data
806       
807        # self._title_index_dic is a dictionary.
808        #The keys are the column titles.
809        #The values are the index positions of file columns.
810        self._attribute_dic, self._title_index_dic = \
811            csv2dict(self._file_name, title_check_list=title_check_list)
812        try:
813            #Have code here that handles caps or lower
814            lats = self._attribute_dic[latitude_title]
815            longs = self._attribute_dic[longitude_title]
816           
817        except KeyError:
818            # maybe a warning..
819            #Let's see if this works..
820            if False != is_x_y_locations:
821                is_x_y_locations = True
822            pass
823        else:
824            self._geospatial = Geospatial_data(latitudes = lats,
825                 longitudes = longs)
826
827        if is_x_y_locations is True:
828            if self._geospatial is not None:
829                pass #fixme throw an error
830            try:
831                xs = self._attribute_dic[x_title]
832                ys = self._attribute_dic[y_title]
833                points = [[float(i),float(j)] for i,j in map(None,xs,ys)]
834            except KeyError:
835                # maybe a warning..
836                msg = "Could not find location information."
837                raise TitleValueError, msg
838            else:
839                self._geospatial = Geospatial_data(data_points=points)
840           
841        # create a list of points that are in the refining_polygon
842        # described by a list of indexes representing the points
843
844    def __cmp__(self, other):
845        #print "self._attribute_dic",self._attribute_dic
846        #print "other._attribute_dic",other._attribute_dic
847        #print "self._title_index_dic", self._title_index_dic
848        #print "other._title_index_dic", other._title_index_dic
849       
850        #check that a is an instance of this class
851        if isinstance(self, type(other)):
852            result = cmp(self._attribute_dic, other._attribute_dic)
853            if result <>0:
854                return result
855            # The order of the columns is important. Therefore..
856            result = cmp(self._title_index_dic, other._title_index_dic)
857            if result <>0:
858                return result
859            for self_ls, other_ls in map(None,self._attribute_dic, \
860                    other._attribute_dic):
861                result = cmp(self._attribute_dic[self_ls],
862                             other._attribute_dic[other_ls])
863                if result <>0:
864                    return result
865            return 0
866        else:
867            return 1
868   
869
870    def get_column(self, column_name, use_refind_polygon=False):
871        """
872        Given a column name return a list of the column values
873
874        Note, the type of the values will be String!
875        do this to change a list of strings to a list of floats
876        time = [float(x) for x in time]
877       
878        Not implemented:
879        if use_refind_polygon is True, only return values in the
880        refined polygon
881        """
882        if not self._attribute_dic.has_key(column_name):
883            msg = 'Therer is  no column called %s!' %column_name
884            raise TitleValueError, msg
885        return self._attribute_dic[column_name]
886
887
888    def get_value(self, value_column_name,
889                  known_column_name,
890                  known_values,
891                  use_refind_polygon=False):
892        """
893        Do linear interpolation on the known_colum, using the known_value,
894        to return a value of the column_value_name.
895        """
896        pass
897
898
899    def get_location(self, use_refind_polygon=False):
900        """
901        Return a geospatial object which describes the
902        locations of the location file.
903
904        Note, if there is not location info, this returns None.
905       
906        Not implemented:
907        if use_refind_polygon is True, only return values in the
908        refined polygon
909        """
910        return self._geospatial
911
912    def set_column(self, column_name, column_values, overwrite=False):
913        """
914        Add a column to the 'end' (with the right most column being the end)
915        of the csv file.
916
917        Set overwrite to True if you want to overwrite a column.
918       
919        Note, in column_name white space is removed and case is not checked.
920        Precondition
921        The column_name and column_values cannot have comma's in it.
922        """
923       
924        value_row_count = \
925            len(self._attribute_dic[self._title_index_dic.keys()[0]])
926        if len(column_values) <> value_row_count: 
927            msg = 'The number of column values must equal the number of rows.'
928            raise DataMissingValuesError, msg
929       
930        if self._attribute_dic.has_key(column_name):
931            if not overwrite:
932                msg = 'Column name %s already in use!' %column_name
933                raise TitleValueError, msg
934        else:
935            # New title.  Add it to the title index.
936            self._title_index_dic[column_name] = len(self._title_index_dic)
937        self._attribute_dic[column_name] = column_values
938        #print "self._title_index_dic[column_name]",self._title_index_dic
939
940    def save(self, file_name=None):
941        """
942        Save the exposure csv file
943        """
944        if file_name is None:
945            file_name = self._file_name
946       
947        fd = open(file_name,'wb')
948        writer = csv.writer(fd)
949       
950        #Write the title to a cvs file
951        line = [None]* len(self._title_index_dic)
952        for title in self._title_index_dic.iterkeys():
953            line[self._title_index_dic[title]]= title
954        writer.writerow(line)
955       
956        # Write the values to a cvs file
957        value_row_count = \
958            len(self._attribute_dic[self._title_index_dic.keys()[0]])
959        for row_i in range(value_row_count):
960            line = [None]* len(self._title_index_dic)
961            for title in self._title_index_dic.iterkeys():
962                line[self._title_index_dic[title]]= \
963                     self._attribute_dic[title][row_i]
964            writer.writerow(line)
965
966
967def csv2dict(file_name, title_check_list=None):
968    """
969    Load in the csv as a dic, title as key and column info as value, .
970    Also, create a dic, title as key and column index as value,
971    to keep track of the column order.
972   
973    WARNING: Vaules are returned as strings.
974    do this to change a list of strings to a list of floats
975        time = [float(x) for x in time]
976    """
977   
978    #
979    attribute_dic = {}
980    title_index_dic = {}
981    titles_stripped = [] # list of titles
982    reader = csv.reader(file(file_name))
983
984    # Read in and manipulate the title info
985    titles = reader.next()
986    for i,title in enumerate(titles):
987        titles_stripped.append(title.strip())
988        title_index_dic[title.strip()] = i
989    title_count = len(titles_stripped)       
990    #print "title_index_dic",title_index_dic
991    if title_check_list is not None:
992        for title_check in title_check_list:
993            #msg = "Reading error.  This row is not present ", title_check
994            #assert title_index_dic.has_key(title_check), msg
995            if not title_index_dic.has_key(title_check):
996                #reader.close()
997                msg = "Reading error.  This row is not present ", \
998                      title_check                     
999                raise IOError, msg
1000               
1001   
1002   
1003    #create a dic of colum values, indexed by column title
1004    for line in reader:
1005        if len(line) <> title_count:
1006            raise IOError #FIXME make this nicer
1007        for i, value in enumerate(line):
1008            attribute_dic.setdefault(titles_stripped[i],[]).append(value)
1009       
1010    return attribute_dic, title_index_dic
1011
1012
1013#Auxiliary
1014def write_obj(filename,x,y,z):
1015    """Store x,y,z vectors into filename (obj format)
1016       Vectors are assumed to have dimension (M,3) where
1017       M corresponds to the number elements.
1018       triangles are assumed to be disconnected
1019
1020       The three numbers in each vector correspond to three vertices,
1021
1022       e.g. the x coordinate of vertex 1 of element i is in x[i,1]
1023
1024    """
1025    #print 'Writing obj to %s' % filename
1026
1027    import os.path
1028
1029    root, ext = os.path.splitext(filename)
1030    if ext == '.obj':
1031        FN = filename
1032    else:
1033        FN = filename + '.obj'
1034
1035
1036    outfile = open(FN, 'wb')
1037    outfile.write("# Triangulation as an obj file\n")
1038
1039    M, N = x.shape
1040    assert N==3  #Assuming three vertices per element
1041
1042    for i in range(M):
1043        for j in range(N):
1044            outfile.write("v %f %f %f\n" % (x[i,j],y[i,j],z[i,j]))
1045
1046    for i in range(M):
1047        base = i*N
1048        outfile.write("f %d %d %d\n" % (base+1,base+2,base+3))
1049
1050    outfile.close()
1051
1052
1053#########################################################
1054#Conversion routines
1055########################################################
1056
1057def sww2obj(basefilename, size):
1058    """Convert netcdf based data output to obj
1059    """
1060    from Scientific.IO.NetCDF import NetCDFFile
1061
1062    from Numeric import Float, zeros
1063
1064    #Get NetCDF
1065    FN = create_filename('.', basefilename, 'sww', size)
1066    print 'Reading from ', FN
1067    fid = NetCDFFile(FN, 'r')  #Open existing file for read
1068
1069
1070    # Get the variables
1071    x = fid.variables['x']
1072    y = fid.variables['y']
1073    z = fid.variables['elevation']
1074    time = fid.variables['time']
1075    stage = fid.variables['stage']
1076
1077    M = size  #Number of lines
1078    xx = zeros((M,3), Float)
1079    yy = zeros((M,3), Float)
1080    zz = zeros((M,3), Float)
1081
1082    for i in range(M):
1083        for j in range(3):
1084            xx[i,j] = x[i+j*M]
1085            yy[i,j] = y[i+j*M]
1086            zz[i,j] = z[i+j*M]
1087
1088    #Write obj for bathymetry
1089    FN = create_filename('.', basefilename, 'obj', size)
1090    write_obj(FN,xx,yy,zz)
1091
1092
1093    #Now read all the data with variable information, combine with
1094    #x,y info and store as obj
1095
1096    for k in range(len(time)):
1097        t = time[k]
1098        print 'Processing timestep %f' %t
1099
1100        for i in range(M):
1101            for j in range(3):
1102                zz[i,j] = stage[k,i+j*M]
1103
1104
1105        #Write obj for variable data
1106        #FN = create_filename(basefilename, 'obj', size, time=t)
1107        FN = create_filename('.', basefilename[:5], 'obj', size, time=t)
1108        write_obj(FN,xx,yy,zz)
1109
1110
1111def dat2obj(basefilename):
1112    """Convert line based data output to obj
1113    FIXME: Obsolete?
1114    """
1115
1116    import glob, os
1117    from anuga.config import data_dir
1118
1119
1120    #Get bathymetry and x,y's
1121    lines = open(data_dir+os.sep+basefilename+'_geometry.dat', 'r').readlines()
1122
1123    from Numeric import zeros, Float
1124
1125    M = len(lines)  #Number of lines
1126    x = zeros((M,3), Float)
1127    y = zeros((M,3), Float)
1128    z = zeros((M,3), Float)
1129
1130    ##i = 0
1131    for i, line in enumerate(lines):
1132        tokens = line.split()
1133        values = map(float,tokens)
1134
1135        for j in range(3):
1136            x[i,j] = values[j*3]
1137            y[i,j] = values[j*3+1]
1138            z[i,j] = values[j*3+2]
1139
1140        ##i += 1
1141
1142
1143    #Write obj for bathymetry
1144    write_obj(data_dir+os.sep+basefilename+'_geometry',x,y,z)
1145
1146
1147    #Now read all the data files with variable information, combine with
1148    #x,y info
1149    #and store as obj
1150
1151    files = glob.glob(data_dir+os.sep+basefilename+'*.dat')
1152
1153    for filename in files:
1154        print 'Processing %s' % filename
1155
1156        lines = open(data_dir+os.sep+filename,'r').readlines()
1157        assert len(lines) == M
1158        root, ext = os.path.splitext(filename)
1159
1160        #Get time from filename
1161        i0 = filename.find('_time=')
1162        if i0 == -1:
1163            #Skip bathymetry file
1164            continue
1165
1166        i0 += 6  #Position where time starts
1167        i1 = filename.find('.dat')
1168
1169        if i1 > i0:
1170            t = float(filename[i0:i1])
1171        else:
1172            raise DataTimeError, 'Hmmmm'
1173
1174
1175
1176        ##i = 0
1177        for i, line in enumerate(lines):
1178            tokens = line.split()
1179            values = map(float,tokens)
1180
1181            for j in range(3):
1182                z[i,j] = values[j]
1183
1184            ##i += 1
1185
1186        #Write obj for variable data
1187        write_obj(data_dir+os.sep+basefilename+'_time=%.4f' %t,x,y,z)
1188
1189
1190def filter_netcdf(filename1, filename2, first=0, last=None, step = 1):
1191    """Read netcdf filename1, pick timesteps first:step:last and save to
1192    nettcdf file filename2
1193    """
1194    from Scientific.IO.NetCDF import NetCDFFile
1195
1196    #Get NetCDF
1197    infile = NetCDFFile(filename1, 'r')  #Open existing file for read
1198    outfile = NetCDFFile(filename2, 'w')  #Open new file
1199
1200
1201    #Copy dimensions
1202    for d in infile.dimensions:
1203        outfile.createDimension(d, infile.dimensions[d])
1204
1205    for name in infile.variables:
1206        var = infile.variables[name]
1207        outfile.createVariable(name, var.typecode(), var.dimensions)
1208
1209
1210    #Copy the static variables
1211    for name in infile.variables:
1212        if name == 'time' or name == 'stage':
1213            pass
1214        else:
1215            #Copy
1216            outfile.variables[name][:] = infile.variables[name][:]
1217
1218    #Copy selected timesteps
1219    time = infile.variables['time']
1220    stage = infile.variables['stage']
1221
1222    newtime = outfile.variables['time']
1223    newstage = outfile.variables['stage']
1224
1225    if last is None:
1226        last = len(time)
1227
1228    selection = range(first, last, step)
1229    for i, j in enumerate(selection):
1230        print 'Copying timestep %d of %d (%f)' %(j, last-first, time[j])
1231        newtime[i] = time[j]
1232        newstage[i,:] = stage[j,:]
1233
1234    #Close
1235    infile.close()
1236    outfile.close()
1237
1238
1239#Get data objects
1240def get_dataobject(domain, mode='w'):
1241    """Return instance of class of given format using filename
1242    """
1243
1244    cls = eval('Data_format_%s' %domain.format)
1245    return cls(domain, mode)
1246
1247
1248
1249
1250def dem2pts(basename_in, basename_out=None,
1251            easting_min=None, easting_max=None,
1252            northing_min=None, northing_max=None,
1253            use_cache=False, verbose=False,):
1254    """Read Digitial Elevation model from the following NetCDF format (.dem)
1255
1256    Example:
1257
1258    ncols         3121
1259    nrows         1800
1260    xllcorner     722000
1261    yllcorner     5893000
1262    cellsize      25
1263    NODATA_value  -9999
1264    138.3698 137.4194 136.5062 135.5558 ..........
1265
1266    Convert to NetCDF pts format which is
1267
1268    points:  (Nx2) Float array
1269    elevation: N Float array
1270    """
1271
1272
1273
1274    kwargs = {'basename_out': basename_out,
1275              'easting_min': easting_min,
1276              'easting_max': easting_max,
1277              'northing_min': northing_min,
1278              'northing_max': northing_max,
1279              'verbose': verbose}
1280
1281    if use_cache is True:
1282        from caching import cache
1283        result = cache(_dem2pts, basename_in, kwargs,
1284                       dependencies = [basename_in + '.dem'],
1285                       verbose = verbose)
1286
1287    else:
1288        result = apply(_dem2pts, [basename_in], kwargs)
1289
1290    return result
1291
1292
1293def _dem2pts(basename_in, basename_out=None, verbose=False,
1294            easting_min=None, easting_max=None,
1295            northing_min=None, northing_max=None):
1296    """Read Digitial Elevation model from the following NetCDF format (.dem)
1297
1298    Internal function. See public function dem2pts for details.
1299    """
1300
1301    #FIXME: Can this be written feasibly using write_pts?
1302
1303    import os
1304    from Scientific.IO.NetCDF import NetCDFFile
1305    from Numeric import Float, zeros, reshape, sum
1306
1307    root = basename_in
1308
1309    #Get NetCDF
1310    infile = NetCDFFile(root + '.dem', 'r')  #Open existing netcdf file for read
1311
1312    if verbose: print 'Reading DEM from %s' %(root + '.dem')
1313
1314    ncols = infile.ncols[0]
1315    nrows = infile.nrows[0]
1316    xllcorner = infile.xllcorner[0]  #Easting of lower left corner
1317    yllcorner = infile.yllcorner[0]  #Northing of lower left corner
1318    cellsize = infile.cellsize[0]
1319    NODATA_value = infile.NODATA_value[0]
1320    dem_elevation = infile.variables['elevation']
1321
1322    zone = infile.zone[0]
1323    false_easting = infile.false_easting[0]
1324    false_northing = infile.false_northing[0]
1325
1326    #Text strings
1327    projection = infile.projection
1328    datum = infile.datum
1329    units = infile.units
1330
1331
1332    #Get output file
1333    if basename_out == None:
1334        ptsname = root + '.pts'
1335    else:
1336        ptsname = basename_out + '.pts'
1337
1338    if verbose: print 'Store to NetCDF file %s' %ptsname
1339    # NetCDF file definition
1340    outfile = NetCDFFile(ptsname, 'w')
1341
1342    #Create new file
1343    outfile.institution = 'Geoscience Australia'
1344    outfile.description = 'NetCDF pts format for compact and portable storage ' +\
1345                      'of spatial point data'
1346    #assign default values
1347    if easting_min is None: easting_min = xllcorner
1348    if easting_max is None: easting_max = xllcorner + ncols*cellsize
1349    if northing_min is None: northing_min = yllcorner
1350    if northing_max is None: northing_max = yllcorner + nrows*cellsize
1351
1352    #compute offsets to update georeferencing
1353    easting_offset = xllcorner - easting_min
1354    northing_offset = yllcorner - northing_min
1355
1356    #Georeferencing
1357    outfile.zone = zone
1358    outfile.xllcorner = easting_min #Easting of lower left corner
1359    outfile.yllcorner = northing_min #Northing of lower left corner
1360    outfile.false_easting = false_easting
1361    outfile.false_northing = false_northing
1362
1363    outfile.projection = projection
1364    outfile.datum = datum
1365    outfile.units = units
1366
1367
1368    #Grid info (FIXME: probably not going to be used, but heck)
1369    outfile.ncols = ncols
1370    outfile.nrows = nrows
1371
1372    dem_elevation_r = reshape(dem_elevation, (nrows, ncols))
1373    totalnopoints = nrows*ncols
1374
1375    # calculating number of NODATA_values for each row in clipped region
1376    #FIXME: use array operations to do faster
1377    nn = 0
1378    k = 0
1379    i1_0 = 0
1380    j1_0 = 0
1381    thisj = 0
1382    thisi = 0
1383    for i in range(nrows):
1384        y = (nrows-i-1)*cellsize + yllcorner
1385        for j in range(ncols):
1386            x = j*cellsize + xllcorner
1387            if easting_min <= x <= easting_max and \
1388               northing_min <= y <= northing_max:
1389                thisj = j
1390                thisi = i
1391                if dem_elevation_r[i,j] == NODATA_value: nn += 1
1392
1393                if k == 0:
1394                    i1_0 = i
1395                    j1_0 = j
1396                k += 1
1397
1398    index1 = j1_0
1399    index2 = thisj
1400
1401    # dimension definitions
1402    nrows_in_bounding_box = int(round((northing_max-northing_min)/cellsize))
1403    ncols_in_bounding_box = int(round((easting_max-easting_min)/cellsize))
1404
1405    clippednopoints = (thisi+1-i1_0)*(thisj+1-j1_0)
1406    nopoints = clippednopoints-nn
1407
1408    clipped_dem_elev = dem_elevation_r[i1_0:thisi+1,j1_0:thisj+1]
1409
1410    if verbose:
1411        print 'There are %d values in the elevation' %totalnopoints
1412        print 'There are %d values in the clipped elevation' %clippednopoints
1413        print 'There are %d NODATA_values in the clipped elevation' %nn
1414
1415    outfile.createDimension('number_of_points', nopoints)
1416    outfile.createDimension('number_of_dimensions', 2) #This is 2d data
1417
1418    # variable definitions
1419    outfile.createVariable('points', Float, ('number_of_points',
1420                                             'number_of_dimensions'))
1421    outfile.createVariable('elevation', Float, ('number_of_points',))
1422
1423    # Get handles to the variables
1424    points = outfile.variables['points']
1425    elevation = outfile.variables['elevation']
1426
1427    lenv = index2-index1+1
1428    #Store data
1429    global_index = 0
1430    #for i in range(nrows):
1431    for i in range(i1_0,thisi+1,1):
1432        if verbose and i%((nrows+10)/10)==0:
1433            print 'Processing row %d of %d' %(i, nrows)
1434
1435        lower_index = global_index
1436
1437        v = dem_elevation_r[i,index1:index2+1]
1438        no_NODATA = sum(v == NODATA_value)
1439        if no_NODATA > 0:
1440            newcols = lenv - no_NODATA #ncols_in_bounding_box - no_NODATA
1441        else:
1442            newcols = lenv #ncols_in_bounding_box
1443
1444        telev = zeros(newcols, Float)
1445        tpoints = zeros((newcols, 2), Float)
1446
1447        local_index = 0
1448
1449        y = (nrows-i-1)*cellsize + yllcorner
1450        #for j in range(ncols):
1451        for j in range(j1_0,index2+1,1):
1452
1453            x = j*cellsize + xllcorner
1454            if easting_min <= x <= easting_max and \
1455               northing_min <= y <= northing_max and \
1456               dem_elevation_r[i,j] <> NODATA_value:
1457                tpoints[local_index, :] = [x-easting_min,y-northing_min]
1458                telev[local_index] = dem_elevation_r[i, j]
1459                global_index += 1
1460                local_index += 1
1461
1462        upper_index = global_index
1463
1464        if upper_index == lower_index + newcols:
1465            points[lower_index:upper_index, :] = tpoints
1466            elevation[lower_index:upper_index] = telev
1467
1468    assert global_index == nopoints, 'index not equal to number of points'
1469
1470    infile.close()
1471    outfile.close()
1472
1473
1474
1475def _read_hecras_cross_sections(lines):
1476    """Return block of surface lines for each cross section
1477    Starts with SURFACE LINE,
1478    Ends with END CROSS-SECTION
1479    """
1480
1481    points = []
1482
1483    reading_surface = False
1484    for i, line in enumerate(lines):
1485
1486        if len(line.strip()) == 0:    #Ignore blanks
1487            continue
1488
1489        if lines[i].strip().startswith('SURFACE LINE'):
1490            reading_surface = True
1491            continue
1492
1493        if lines[i].strip().startswith('END') and reading_surface:
1494            yield points
1495            reading_surface = False
1496            points = []
1497
1498        if reading_surface:
1499            fields = line.strip().split(',')
1500            easting = float(fields[0])
1501            northing = float(fields[1])
1502            elevation = float(fields[2])
1503            points.append([easting, northing, elevation])
1504
1505
1506
1507
1508def hecras_cross_sections2pts(basename_in,
1509                              basename_out=None,
1510                              verbose=False):
1511    """Read HEC-RAS Elevation datal from the following ASCII format (.sdf)
1512
1513    Example:
1514
1515
1516# RAS export file created on Mon 15Aug2005 11:42
1517# by HEC-RAS Version 3.1.1
1518
1519BEGIN HEADER:
1520  UNITS: METRIC
1521  DTM TYPE: TIN
1522  DTM: v:\1\cit\perth_topo\river_tin
1523  STREAM LAYER: c:\local\hecras\21_02_03\up_canning_cent3d.shp
1524  CROSS-SECTION LAYER: c:\local\hecras\21_02_03\up_can_xs3d.shp
1525  MAP PROJECTION: UTM
1526  PROJECTION ZONE: 50
1527  DATUM: AGD66
1528  VERTICAL DATUM:
1529  NUMBER OF REACHES:  19
1530  NUMBER OF CROSS-SECTIONS:  14206
1531END HEADER:
1532
1533
1534Only the SURFACE LINE data of the following form will be utilised
1535
1536  CROSS-SECTION:
1537    STREAM ID:Southern-Wungong
1538    REACH ID:Southern-Wungong
1539    STATION:19040.*
1540    CUT LINE:
1541      405548.671603161 , 6438142.7594925
1542      405734.536092045 , 6438326.10404912
1543      405745.130459356 , 6438331.48627354
1544      405813.89633823 , 6438368.6272789
1545    SURFACE LINE:
1546     405548.67,   6438142.76,   35.37
1547     405552.24,   6438146.28,   35.41
1548     405554.78,   6438148.78,   35.44
1549     405555.80,   6438149.79,   35.44
1550     405559.37,   6438153.31,   35.45
1551     405560.88,   6438154.81,   35.44
1552     405562.93,   6438156.83,   35.42
1553     405566.50,   6438160.35,   35.38
1554     405566.99,   6438160.83,   35.37
1555     ...
1556   END CROSS-SECTION
1557
1558    Convert to NetCDF pts format which is
1559
1560    points:  (Nx2) Float array
1561    elevation: N Float array
1562    """
1563
1564    import os
1565    from Scientific.IO.NetCDF import NetCDFFile
1566    from Numeric import Float, zeros, reshape
1567
1568    root = basename_in
1569
1570    #Get ASCII file
1571    infile = open(root + '.sdf', 'r')  #Open SDF file for read
1572
1573    if verbose: print 'Reading DEM from %s' %(root + '.sdf')
1574
1575    lines = infile.readlines()
1576    infile.close()
1577
1578    if verbose: print 'Converting to pts format'
1579
1580    i = 0
1581    while lines[i].strip() == '' or lines[i].strip().startswith('#'):
1582        i += 1
1583
1584    assert lines[i].strip().upper() == 'BEGIN HEADER:'
1585    i += 1
1586
1587    assert lines[i].strip().upper().startswith('UNITS:')
1588    units = lines[i].strip().split()[1]
1589    i += 1
1590
1591    assert lines[i].strip().upper().startswith('DTM TYPE:')
1592    i += 1
1593
1594    assert lines[i].strip().upper().startswith('DTM:')
1595    i += 1
1596
1597    assert lines[i].strip().upper().startswith('STREAM')
1598    i += 1
1599
1600    assert lines[i].strip().upper().startswith('CROSS')
1601    i += 1
1602
1603    assert lines[i].strip().upper().startswith('MAP PROJECTION:')
1604    projection = lines[i].strip().split(':')[1]
1605    i += 1
1606
1607    assert lines[i].strip().upper().startswith('PROJECTION ZONE:')
1608    zone = int(lines[i].strip().split(':')[1])
1609    i += 1
1610
1611    assert lines[i].strip().upper().startswith('DATUM:')
1612    datum = lines[i].strip().split(':')[1]
1613    i += 1
1614
1615    assert lines[i].strip().upper().startswith('VERTICAL DATUM:')
1616    i += 1
1617
1618    assert lines[i].strip().upper().startswith('NUMBER OF REACHES:')
1619    i += 1
1620
1621    assert lines[i].strip().upper().startswith('NUMBER OF CROSS-SECTIONS:')
1622    number_of_cross_sections = int(lines[i].strip().split(':')[1])
1623    i += 1
1624
1625
1626    #Now read all points
1627    points = []
1628    elevation = []
1629    for j, entries in enumerate(_read_hecras_cross_sections(lines[i:])):
1630        for k, entry in enumerate(entries):
1631            points.append(entry[:2])
1632            elevation.append(entry[2])
1633
1634
1635    msg = 'Actual #number_of_cross_sections == %d, Reported as %d'\
1636          %(j+1, number_of_cross_sections)
1637    assert j+1 == number_of_cross_sections, msg
1638
1639    #Get output file
1640    if basename_out == None:
1641        ptsname = root + '.pts'
1642    else:
1643        ptsname = basename_out + '.pts'
1644
1645    geo_ref = Geo_reference(zone, 0, 0, datum, projection, units)
1646    geo = Geospatial_data(points, {"elevation":elevation},
1647                          verbose=verbose, geo_reference=geo_ref)
1648    geo.export_points_file(ptsname)
1649
1650def export_grid(basename_in, extra_name_out = None,
1651                quantities = None, # defaults to elevation
1652                timestep = None,
1653                reduction = None,
1654                cellsize = 10,
1655                NODATA_value = -9999,
1656                easting_min = None,
1657                easting_max = None,
1658                northing_min = None,
1659                northing_max = None,
1660                verbose = False,
1661                origin = None,
1662                datum = 'WGS84',
1663                format = 'ers'):
1664    """
1665   
1666    Wrapper for sww2dem. - see sww2dem to find out what most of the
1667    parameters do.
1668
1669    Quantities is a list of quantities.  Each quantity will be
1670    calculated for each sww file.
1671
1672    This returns the basenames of the files returned, which is made up
1673    of the dir and all of the file name, except the extension.
1674
1675    This function returns the names of the files produced.
1676
1677    It will also produce as many output files as there are input sww files.
1678    """
1679   
1680    if quantities is None:
1681        quantities = ['elevation']
1682       
1683    if type(quantities) is str:
1684            quantities = [quantities]
1685
1686    # How many sww files are there?
1687    dir, base = os.path.split(basename_in)
1688    #print "basename_in",basename_in
1689    #print "base",base
1690
1691    iterate_over = get_all_swwfiles(dir,base,verbose)
1692   
1693    if dir == "":
1694        dir = "." # Unix compatibility
1695   
1696    files_out = []
1697    #print 'sww_file',iterate_over
1698    for sww_file in iterate_over:
1699        for quantity in quantities:
1700            if extra_name_out is None:
1701                basename_out = sww_file + '_' + quantity
1702            else:
1703                basename_out = sww_file + '_' + quantity + '_' \
1704                               + extra_name_out
1705#            print "basename_out", basename_out
1706       
1707            file_out = sww2dem(dir+sep+sww_file, dir+sep+basename_out,
1708                               quantity, 
1709                               timestep,
1710                               reduction,
1711                               cellsize,
1712                               NODATA_value,
1713                               easting_min,
1714                               easting_max,
1715                               northing_min,
1716                               northing_max,
1717                               verbose,
1718                               origin,
1719                               datum,
1720                               format)
1721            files_out.append(file_out)
1722    #print "basenames_out after",basenames_out
1723    return files_out
1724
1725
1726def get_timeseries(production_dirs, output_dir, scenario_name, gauges_dir_name,
1727                   plot_quantity, generate_fig = False,
1728                   reportname = None, surface = False, time_min = None,
1729                   time_max = None, title_on = False, verbose = True,
1730                   nodes=None):
1731    """
1732    nodes - number of processes used.
1733
1734    warning - this function has no tests
1735    """
1736    if reportname == None:
1737        report = False
1738    else:
1739        report = True
1740       
1741    if nodes is None:
1742        is_parallel = False
1743    else:
1744        is_parallel = True
1745       
1746    # Generate figures
1747    swwfiles = {}
1748   
1749    if is_parallel is True:   
1750        for i in range(nodes):
1751            print 'Sending node %d of %d' %(i,nodes)
1752            swwfiles = {}
1753            if not reportname == None:
1754                reportname = report_name + '_%s' %(i)
1755            for label_id in production_dirs.keys():
1756                if label_id == 'boundaries':
1757                    swwfile = best_boundary_sww
1758                else:
1759                    file_loc = output_dir + label_id + sep
1760                    sww_extra = '_P%s_%s' %(i,nodes)
1761                    swwfile = file_loc + scenario_name + sww_extra + '.sww'
1762                    print 'swwfile',swwfile
1763                    swwfiles[swwfile] = label_id
1764
1765                texname, elev_output = sww2timeseries(swwfiles,
1766                                              gauges_dir_name,
1767                                              production_dirs,
1768                                              report = report,
1769                                              reportname = reportname,
1770                                              plot_quantity = plot_quantity,
1771                                              generate_fig = generate_fig,
1772                                              surface = surface,
1773                                              time_min = time_min,
1774                                              time_max = time_max,
1775                                              title_on = title_on,
1776                                              verbose = verbose)
1777    else:   
1778        for label_id in production_dirs.keys():       
1779            if label_id == 'boundaries':
1780                print 'boundaries'
1781                file_loc = project.boundaries_in_dir
1782                swwfile = project.boundaries_dir_name3 + '.sww'
1783                #  swwfile = boundary_dir_filename
1784            else:
1785                file_loc = output_dir + label_id + sep
1786                swwfile = file_loc + scenario_name + '.sww'
1787            swwfiles[swwfile] = label_id
1788       
1789        texname, elev_output = sww2timeseries(swwfiles,
1790                                              gauges_dir_name,
1791                                              production_dirs,
1792                                              report = report,
1793                                              reportname = reportname,
1794                                              plot_quantity = plot_quantity,
1795                                              generate_fig = generate_fig,
1796                                              surface = surface,
1797                                              time_min = time_min,
1798                                              time_max = time_max,
1799                                              title_on = title_on,
1800                                              verbose = verbose)
1801                                         
1802
1803   
1804def sww2dem(basename_in, basename_out = None,
1805            quantity = None, # defaults to elevation
1806            timestep = None,
1807            reduction = None,
1808            cellsize = 10,
1809            NODATA_value = -9999,
1810            easting_min = None,
1811            easting_max = None,
1812            northing_min = None,
1813            northing_max = None,
1814            verbose = False,
1815            origin = None,
1816            datum = 'WGS84',
1817            format = 'ers'):
1818
1819    """Read SWW file and convert to Digitial Elevation model format
1820    (.asc or .ers)
1821
1822    Example (ASC):
1823
1824    ncols         3121
1825    nrows         1800
1826    xllcorner     722000
1827    yllcorner     5893000
1828    cellsize      25
1829    NODATA_value  -9999
1830    138.3698 137.4194 136.5062 135.5558 ..........
1831
1832    Also write accompanying file with same basename_in but extension .prj
1833    used to fix the UTM zone, datum, false northings and eastings.
1834
1835    The prj format is assumed to be as
1836
1837    Projection    UTM
1838    Zone          56
1839    Datum         WGS84
1840    Zunits        NO
1841    Units         METERS
1842    Spheroid      WGS84
1843    Xshift        0.0000000000
1844    Yshift        10000000.0000000000
1845    Parameters
1846
1847
1848    The parameter quantity must be the name of an existing quantity or
1849    an expression involving existing quantities. The default is
1850    'elevation'. Quantity is not a list of quantities.
1851
1852    if timestep (an index) is given, output quantity at that timestep
1853
1854    if reduction is given use that to reduce quantity over all timesteps.
1855
1856    datum
1857
1858    format can be either 'asc' or 'ers'
1859    """
1860
1861    import sys
1862    from Numeric import array, Float, concatenate, NewAxis, zeros, reshape, \
1863         sometrue
1864    from Numeric import array2string
1865
1866    from anuga.utilities.polygon import inside_polygon, outside_polygon, \
1867         separate_points_by_polygon
1868    from anuga.abstract_2d_finite_volumes.util import \
1869         apply_expression_to_dictionary
1870
1871    msg = 'Format must be either asc or ers'
1872    assert format.lower() in ['asc', 'ers'], msg
1873
1874
1875    false_easting = 500000
1876    false_northing = 10000000
1877
1878    if quantity is None:
1879        quantity = 'elevation'
1880       
1881    if reduction is None:
1882        reduction = max
1883
1884    if basename_out is None:
1885        basename_out = basename_in + '_%s' %quantity
1886
1887    if quantity_formula.has_key(quantity):
1888        quantity = quantity_formula[quantity]
1889       
1890    swwfile = basename_in + '.sww'
1891    demfile = basename_out + '.' + format
1892    # Note the use of a .ers extension is optional (write_ermapper_grid will
1893    # deal with either option
1894   
1895    #if verbose: bye= nsuadsfd[0] # uncomment to check catching verbose errors
1896   
1897    # Read sww file
1898    if verbose: 
1899        print 'Reading from %s' %swwfile
1900        print 'Output directory is %s' %basename_out
1901   
1902    from Scientific.IO.NetCDF import NetCDFFile
1903    fid = NetCDFFile(swwfile)
1904
1905    #Get extent and reference
1906    x = fid.variables['x'][:]
1907    y = fid.variables['y'][:]
1908    volumes = fid.variables['volumes'][:]
1909    if timestep is not None:
1910        times = fid.variables['time'][timestep]
1911    else:
1912        times = fid.variables['time'][:]
1913
1914    number_of_timesteps = fid.dimensions['number_of_timesteps']
1915    number_of_points = fid.dimensions['number_of_points']
1916   
1917    if origin is None:
1918
1919        # Get geo_reference
1920        # sww files don't have to have a geo_ref
1921        try:
1922            geo_reference = Geo_reference(NetCDFObject=fid)
1923        except AttributeError, e:
1924            geo_reference = Geo_reference() # Default georef object
1925
1926        xllcorner = geo_reference.get_xllcorner()
1927        yllcorner = geo_reference.get_yllcorner()
1928        zone = geo_reference.get_zone()
1929    else:
1930        zone = origin[0]
1931        xllcorner = origin[1]
1932        yllcorner = origin[2]
1933
1934
1935
1936    # FIXME: Refactor using code from Interpolation_function.statistics
1937    # (in interpolate.py)
1938    # Something like print swwstats(swwname)
1939    if verbose:
1940        print '------------------------------------------------'
1941        print 'Statistics of SWW file:'
1942        print '  Name: %s' %swwfile
1943        print '  Reference:'
1944        print '    Lower left corner: [%f, %f]'\
1945              %(xllcorner, yllcorner)
1946        if timestep is not None:
1947            print '    Time: %f' %(times)
1948        else:
1949            print '    Start time: %f' %fid.starttime[0]
1950        print '  Extent:'
1951        print '    x [m] in [%f, %f], len(x) == %d'\
1952              %(min(x.flat), max(x.flat), len(x.flat))
1953        print '    y [m] in [%f, %f], len(y) == %d'\
1954              %(min(y.flat), max(y.flat), len(y.flat))
1955        if timestep is not None:
1956            print '    t [s] = %f, len(t) == %d' %(times, 1)
1957        else:
1958            print '    t [s] in [%f, %f], len(t) == %d'\
1959              %(min(times), max(times), len(times))
1960        print '  Quantities [SI units]:'
1961        for name in ['stage', 'xmomentum', 'ymomentum']:
1962            q = fid.variables[name][:].flat
1963            if timestep is not None:
1964                q = q[timestep*len(x):(timestep+1)*len(x)]
1965            if verbose: print '    %s in [%f, %f]' %(name, min(q), max(q))
1966        for name in ['elevation']:
1967            q = fid.variables[name][:].flat
1968            if verbose: print '    %s in [%f, %f]' %(name, min(q), max(q))
1969           
1970    # Get quantity and reduce if applicable
1971    if verbose: print 'Processing quantity %s' %quantity
1972
1973    # Turn NetCDF objects into Numeric arrays
1974    quantity_dict = {}
1975    for name in fid.variables.keys():
1976        quantity_dict[name] = fid.variables[name][:] 
1977
1978
1979    # Convert quantity expression to quantities found in sww file   
1980    q = apply_expression_to_dictionary(quantity, quantity_dict)
1981
1982    if len(q.shape) == 2:
1983        #q has a time component and needs to be reduced along
1984        #the temporal dimension
1985        if verbose: print 'Reducing quantity %s' %quantity
1986        q_reduced = zeros( number_of_points, Float )
1987       
1988        if timestep is not None:
1989            for k in range(number_of_points):
1990                q_reduced[k] = q[timestep,k] 
1991        else:
1992            for k in range(number_of_points):
1993                q_reduced[k] = reduction( q[:,k] )
1994
1995        q = q_reduced
1996
1997    #Post condition: Now q has dimension: number_of_points
1998    assert len(q.shape) == 1
1999    assert q.shape[0] == number_of_points
2000
2001
2002    if verbose:
2003        print 'Processed values for %s are in [%f, %f]' %(quantity, min(q), max(q))
2004
2005
2006    #Create grid and update xll/yll corner and x,y
2007
2008    #Relative extent
2009    if easting_min is None:
2010        xmin = min(x)
2011    else:
2012        xmin = easting_min - xllcorner
2013
2014    if easting_max is None:
2015        xmax = max(x)
2016    else:
2017        xmax = easting_max - xllcorner
2018
2019    if northing_min is None:
2020        ymin = min(y)
2021    else:
2022        ymin = northing_min - yllcorner
2023
2024    if northing_max is None:
2025        ymax = max(y)
2026    else:
2027        ymax = northing_max - yllcorner
2028
2029
2030
2031    if verbose: print 'Creating grid'
2032    ncols = int((xmax-xmin)/cellsize)+1
2033    nrows = int((ymax-ymin)/cellsize)+1
2034
2035
2036    #New absolute reference and coordinates
2037    newxllcorner = xmin+xllcorner
2038    newyllcorner = ymin+yllcorner
2039
2040    x = x+xllcorner-newxllcorner
2041    y = y+yllcorner-newyllcorner
2042   
2043    vertex_points = concatenate ((x[:, NewAxis] ,y[:, NewAxis]), axis = 1)
2044    assert len(vertex_points.shape) == 2
2045
2046    grid_points = zeros ( (ncols*nrows, 2), Float )
2047
2048
2049    for i in xrange(nrows):
2050        if format.lower() == 'asc':
2051            yg = i*cellsize
2052        else:
2053        #this will flip the order of the y values for ers
2054            yg = (nrows-i)*cellsize
2055
2056        for j in xrange(ncols):
2057            xg = j*cellsize
2058            k = i*ncols + j
2059
2060            grid_points[k,0] = xg
2061            grid_points[k,1] = yg
2062
2063    #Interpolate
2064    from anuga.fit_interpolate.interpolate import Interpolate
2065
2066    # Remove loners from vertex_points, volumes here
2067    vertex_points, volumes = remove_lone_verts(vertex_points, volumes)
2068    #export_mesh_file('monkey.tsh',{'vertices':vertex_points, 'triangles':volumes})
2069    #import sys; sys.exit()
2070    interp = Interpolate(vertex_points, volumes, verbose = verbose)
2071
2072    #Interpolate using quantity values
2073    if verbose: print 'Interpolating'
2074    grid_values = interp.interpolate(q, grid_points).flat
2075
2076
2077    if verbose:
2078        print 'Interpolated values are in [%f, %f]' %(min(grid_values),
2079                                                      max(grid_values))
2080
2081    #Assign NODATA_value to all points outside bounding polygon (from interpolation mesh)
2082    P = interp.mesh.get_boundary_polygon()
2083    outside_indices = outside_polygon(grid_points, P, closed=True)
2084
2085    for i in outside_indices:
2086        grid_values[i] = NODATA_value
2087
2088
2089
2090
2091    if format.lower() == 'ers':
2092        # setup ERS header information
2093        grid_values = reshape(grid_values,(nrows, ncols))
2094        header = {}
2095        header['datum'] = '"' + datum + '"'
2096        # FIXME The use of hardwired UTM and zone number needs to be made optional
2097        # FIXME Also need an automatic test for coordinate type (i.e. EN or LL)
2098        header['projection'] = '"UTM-' + str(zone) + '"'
2099        header['coordinatetype'] = 'EN'
2100        if header['coordinatetype'] == 'LL':
2101            header['longitude'] = str(newxllcorner)
2102            header['latitude'] = str(newyllcorner)
2103        elif header['coordinatetype'] == 'EN':
2104            header['eastings'] = str(newxllcorner)
2105            header['northings'] = str(newyllcorner)
2106        header['nullcellvalue'] = str(NODATA_value)
2107        header['xdimension'] = str(cellsize)
2108        header['ydimension'] = str(cellsize)
2109        header['value'] = '"' + quantity + '"'
2110        #header['celltype'] = 'IEEE8ByteReal'  #FIXME: Breaks unit test
2111
2112
2113        #Write
2114        if verbose: print 'Writing %s' %demfile
2115        import ermapper_grids
2116        ermapper_grids.write_ermapper_grid(demfile, grid_values, header)
2117
2118        fid.close()
2119    else:
2120        #Write to Ascii format
2121
2122        #Write prj file
2123        prjfile = basename_out + '.prj'
2124
2125        if verbose: print 'Writing %s' %prjfile
2126        prjid = open(prjfile, 'w')
2127        prjid.write('Projection    %s\n' %'UTM')
2128        prjid.write('Zone          %d\n' %zone)
2129        prjid.write('Datum         %s\n' %datum)
2130        prjid.write('Zunits        NO\n')
2131        prjid.write('Units         METERS\n')
2132        prjid.write('Spheroid      %s\n' %datum)
2133        prjid.write('Xshift        %d\n' %false_easting)
2134        prjid.write('Yshift        %d\n' %false_northing)
2135        prjid.write('Parameters\n')
2136        prjid.close()
2137
2138
2139
2140        if verbose: print 'Writing %s' %demfile
2141
2142        ascid = open(demfile, 'w')
2143
2144        ascid.write('ncols         %d\n' %ncols)
2145        ascid.write('nrows         %d\n' %nrows)
2146        ascid.write('xllcorner     %d\n' %newxllcorner)
2147        ascid.write('yllcorner     %d\n' %newyllcorner)
2148        ascid.write('cellsize      %f\n' %cellsize)
2149        ascid.write('NODATA_value  %d\n' %NODATA_value)
2150
2151
2152        #Get bounding polygon from mesh
2153        #P = interp.mesh.get_boundary_polygon()
2154        #inside_indices = inside_polygon(grid_points, P)
2155
2156        for i in range(nrows):
2157            if verbose and i%((nrows+10)/10)==0:
2158                print 'Doing row %d of %d' %(i, nrows)
2159
2160            base_index = (nrows-i-1)*ncols
2161
2162            slice = grid_values[base_index:base_index+ncols]
2163            s = array2string(slice, max_line_width=sys.maxint)
2164            ascid.write(s[1:-1] + '\n')
2165
2166
2167            #print
2168            #for j in range(ncols):
2169            #    index = base_index+j#
2170            #    print grid_values[index],
2171            #    ascid.write('%f ' %grid_values[index])
2172            #ascid.write('\n')
2173
2174        #Close
2175        ascid.close()
2176        fid.close()
2177        return basename_out
2178
2179#Backwards compatibility
2180def sww2asc(basename_in, basename_out = None,
2181            quantity = None,
2182            timestep = None,
2183            reduction = None,
2184            cellsize = 10,
2185            verbose = False,
2186            origin = None):
2187    print 'sww2asc will soon be obsoleted - please use sww2dem'
2188    sww2dem(basename_in,
2189            basename_out = basename_out,
2190            quantity = quantity,
2191            timestep = timestep,
2192            reduction = reduction,
2193            cellsize = cellsize,
2194            verbose = verbose,
2195            origin = origin,
2196        datum = 'WGS84',
2197        format = 'asc')
2198
2199def sww2ers(basename_in, basename_out = None,
2200            quantity = None,
2201            timestep = None,
2202            reduction = None,
2203            cellsize = 10,
2204            verbose = False,
2205            origin = None,
2206            datum = 'WGS84'):
2207    print 'sww2ers will soon be obsoleted - please use sww2dem'
2208    sww2dem(basename_in,
2209            basename_out = basename_out,
2210            quantity = quantity,
2211            timestep = timestep,
2212            reduction = reduction,
2213            cellsize = cellsize,
2214            verbose = verbose,
2215            origin = origin,
2216            datum = datum,
2217            format = 'ers')
2218################################# END COMPATIBILITY ##############
2219
2220
2221
2222def sww2pts(basename_in, basename_out=None,
2223            data_points=None,
2224            quantity=None,
2225            timestep=None,
2226            reduction=None,
2227            NODATA_value=-9999,
2228            verbose=False,
2229            origin=None):
2230            #datum = 'WGS84')
2231
2232
2233    """Read SWW file and convert to interpolated values at selected points
2234
2235    The parameter quantity' must be the name of an existing quantity or
2236    an expression involving existing quantities. The default is
2237    'elevation'.
2238
2239    if timestep (an index) is given, output quantity at that timestep
2240
2241    if reduction is given use that to reduce quantity over all timesteps.
2242
2243    data_points (Nx2 array) give locations of points where quantity is to be computed.
2244   
2245    """
2246
2247    import sys
2248    from Numeric import array, Float, concatenate, NewAxis, zeros, reshape, sometrue
2249    from Numeric import array2string
2250
2251    from anuga.utilities.polygon import inside_polygon, outside_polygon, separate_points_by_polygon
2252    from anuga.abstract_2d_finite_volumes.util import apply_expression_to_dictionary
2253
2254    from anuga.geospatial_data.geospatial_data import Geospatial_data
2255
2256    if quantity is None:
2257        quantity = 'elevation'
2258
2259    if reduction is None:
2260        reduction = max
2261
2262    if basename_out is None:
2263        basename_out = basename_in + '_%s' %quantity
2264
2265    swwfile = basename_in + '.sww'
2266    ptsfile = basename_out + '.pts'
2267
2268    # Read sww file
2269    if verbose: print 'Reading from %s' %swwfile
2270    from Scientific.IO.NetCDF import NetCDFFile
2271    fid = NetCDFFile(swwfile)
2272
2273    # Get extent and reference
2274    x = fid.variables['x'][:]
2275    y = fid.variables['y'][:]
2276    volumes = fid.variables['volumes'][:]
2277
2278    number_of_timesteps = fid.dimensions['number_of_timesteps']
2279    number_of_points = fid.dimensions['number_of_points']
2280    if origin is None:
2281
2282        # Get geo_reference
2283        # sww files don't have to have a geo_ref
2284        try:
2285            geo_reference = Geo_reference(NetCDFObject=fid)
2286        except AttributeError, e:
2287            geo_reference = Geo_reference() #Default georef object
2288
2289        xllcorner = geo_reference.get_xllcorner()
2290        yllcorner = geo_reference.get_yllcorner()
2291        zone = geo_reference.get_zone()
2292    else:
2293        zone = origin[0]
2294        xllcorner = origin[1]
2295        yllcorner = origin[2]
2296
2297
2298
2299    # FIXME: Refactor using code from file_function.statistics
2300    # Something like print swwstats(swwname)
2301    if verbose:
2302        x = fid.variables['x'][:]
2303        y = fid.variables['y'][:]
2304        times = fid.variables['time'][:]
2305        print '------------------------------------------------'
2306        print 'Statistics of SWW file:'
2307        print '  Name: %s' %swwfile
2308        print '  Reference:'
2309        print '    Lower left corner: [%f, %f]'\
2310              %(xllcorner, yllcorner)
2311        print '    Start time: %f' %fid.starttime[0]
2312        print '  Extent:'
2313        print '    x [m] in [%f, %f], len(x) == %d'\
2314              %(min(x.flat), max(x.flat), len(x.flat))
2315        print '    y [m] in [%f, %f], len(y) == %d'\
2316              %(min(y.flat), max(y.flat), len(y.flat))
2317        print '    t [s] in [%f, %f], len(t) == %d'\
2318              %(min(times), max(times), len(times))
2319        print '  Quantities [SI units]:'
2320        for name in ['stage', 'xmomentum', 'ymomentum', 'elevation']:
2321            q = fid.variables[name][:].flat
2322            print '    %s in [%f, %f]' %(name, min(q), max(q))
2323
2324
2325
2326    # Get quantity and reduce if applicable
2327    if verbose: print 'Processing quantity %s' %quantity
2328
2329    # Turn NetCDF objects into Numeric arrays
2330    quantity_dict = {}
2331    for name in fid.variables.keys():
2332        quantity_dict[name] = fid.variables[name][:]
2333
2334
2335
2336    # Convert quantity expression to quantities found in sww file   
2337    q = apply_expression_to_dictionary(quantity, quantity_dict)
2338
2339
2340
2341    if len(q.shape) == 2:
2342        # q has a time component and needs to be reduced along
2343        # the temporal dimension
2344        if verbose: print 'Reducing quantity %s' %quantity
2345        q_reduced = zeros( number_of_points, Float )
2346
2347        for k in range(number_of_points):
2348            q_reduced[k] = reduction( q[:,k] )
2349
2350        q = q_reduced
2351
2352    # Post condition: Now q has dimension: number_of_points
2353    assert len(q.shape) == 1
2354    assert q.shape[0] == number_of_points
2355
2356
2357    if verbose:
2358        print 'Processed values for %s are in [%f, %f]' %(quantity, min(q), max(q))
2359
2360
2361    # Create grid and update xll/yll corner and x,y
2362    vertex_points = concatenate ((x[:, NewAxis] ,y[:, NewAxis]), axis = 1)
2363    assert len(vertex_points.shape) == 2
2364
2365    # Interpolate
2366    from anuga.fit_interpolate.interpolate import Interpolate
2367    interp = Interpolate(vertex_points, volumes, verbose = verbose)
2368
2369    # Interpolate using quantity values
2370    if verbose: print 'Interpolating'
2371    interpolated_values = interp.interpolate(q, data_points).flat
2372
2373
2374    if verbose:
2375        print 'Interpolated values are in [%f, %f]' %(min(interpolated_values),
2376                                                      max(interpolated_values))
2377
2378    # Assign NODATA_value to all points outside bounding polygon (from interpolation mesh)
2379    P = interp.mesh.get_boundary_polygon()
2380    outside_indices = outside_polygon(data_points, P, closed=True)
2381
2382    for i in outside_indices:
2383        interpolated_values[i] = NODATA_value
2384
2385
2386    # Store results   
2387    G = Geospatial_data(data_points=data_points,
2388                        attributes=interpolated_values)
2389
2390    G.export_points_file(ptsfile, absolute = True)
2391
2392    fid.close()
2393
2394
2395def convert_dem_from_ascii2netcdf(basename_in, basename_out = None,
2396                                  use_cache = False,
2397                                  verbose = False):
2398    """Read Digitial Elevation model from the following ASCII format (.asc)
2399
2400    Example:
2401
2402    ncols         3121
2403    nrows         1800
2404    xllcorner     722000
2405    yllcorner     5893000
2406    cellsize      25
2407    NODATA_value  -9999
2408    138.3698 137.4194 136.5062 135.5558 ..........
2409
2410    Convert basename_in + '.asc' to NetCDF format (.dem)
2411    mimicking the ASCII format closely.
2412
2413
2414    An accompanying file with same basename_in but extension .prj must exist
2415    and is used to fix the UTM zone, datum, false northings and eastings.
2416
2417    The prj format is assumed to be as
2418
2419    Projection    UTM
2420    Zone          56
2421    Datum         WGS84
2422    Zunits        NO
2423    Units         METERS
2424    Spheroid      WGS84
2425    Xshift        0.0000000000
2426    Yshift        10000000.0000000000
2427    Parameters
2428    """
2429
2430
2431
2432    kwargs = {'basename_out': basename_out, 'verbose': verbose}
2433
2434    if use_cache is True:
2435        from caching import cache
2436        result = cache(_convert_dem_from_ascii2netcdf, basename_in, kwargs,
2437                       dependencies = [basename_in + '.asc'],
2438                       verbose = verbose)
2439
2440    else:
2441        result = apply(_convert_dem_from_ascii2netcdf, [basename_in], kwargs)
2442
2443    return result
2444
2445
2446
2447
2448
2449
2450def _convert_dem_from_ascii2netcdf(basename_in, basename_out = None,
2451                                  verbose = False):
2452    """Read Digitial Elevation model from the following ASCII format (.asc)
2453
2454    Internal function. See public function convert_dem_from_ascii2netcdf for details.
2455    """
2456
2457    import os
2458    from Scientific.IO.NetCDF import NetCDFFile
2459    from Numeric import Float, array
2460
2461    #root, ext = os.path.splitext(basename_in)
2462    root = basename_in
2463
2464    ###########################################
2465    # Read Meta data
2466    if verbose: print 'Reading METADATA from %s' %root + '.prj'
2467    metadatafile = open(root + '.prj')
2468    metalines = metadatafile.readlines()
2469    metadatafile.close()
2470
2471    L = metalines[0].strip().split()
2472    assert L[0].strip().lower() == 'projection'
2473    projection = L[1].strip()                   #TEXT
2474
2475    L = metalines[1].strip().split()
2476    assert L[0].strip().lower() == 'zone'
2477    zone = int(L[1].strip())
2478
2479    L = metalines[2].strip().split()
2480    assert L[0].strip().lower() == 'datum'
2481    datum = L[1].strip()                        #TEXT
2482
2483    L = metalines[3].strip().split()
2484    assert L[0].strip().lower() == 'zunits'     #IGNORE
2485    zunits = L[1].strip()                       #TEXT
2486
2487    L = metalines[4].strip().split()
2488    assert L[0].strip().lower() == 'units'
2489    units = L[1].strip()                        #TEXT
2490
2491    L = metalines[5].strip().split()
2492    assert L[0].strip().lower() == 'spheroid'   #IGNORE
2493    spheroid = L[1].strip()                     #TEXT
2494
2495    L = metalines[6].strip().split()
2496    assert L[0].strip().lower() == 'xshift'
2497    false_easting = float(L[1].strip())
2498
2499    L = metalines[7].strip().split()
2500    assert L[0].strip().lower() == 'yshift'
2501    false_northing = float(L[1].strip())
2502
2503    #print false_easting, false_northing, zone, datum
2504
2505
2506    ###########################################
2507    #Read DEM data
2508
2509    datafile = open(basename_in + '.asc')
2510
2511    if verbose: print 'Reading DEM from %s' %(basename_in + '.asc')
2512    lines = datafile.readlines()
2513    datafile.close()
2514
2515    if verbose: print 'Got', len(lines), ' lines'
2516
2517    ncols = int(lines[0].split()[1].strip())
2518    nrows = int(lines[1].split()[1].strip())
2519    xllcorner = float(lines[2].split()[1].strip())
2520    yllcorner = float(lines[3].split()[1].strip())
2521    cellsize = float(lines[4].split()[1].strip())
2522    NODATA_value = int(lines[5].split()[1].strip())
2523
2524    assert len(lines) == nrows + 6
2525
2526
2527    ##########################################
2528
2529
2530    if basename_out == None:
2531        netcdfname = root + '.dem'
2532    else:
2533        netcdfname = basename_out + '.dem'
2534
2535    if verbose: print 'Store to NetCDF file %s' %netcdfname
2536    # NetCDF file definition
2537    fid = NetCDFFile(netcdfname, 'w')
2538
2539    #Create new file
2540    fid.institution = 'Geoscience Australia'
2541    fid.description = 'NetCDF DEM format for compact and portable storage ' +\
2542                      'of spatial point data'
2543
2544    fid.ncols = ncols
2545    fid.nrows = nrows
2546    fid.xllcorner = xllcorner
2547    fid.yllcorner = yllcorner
2548    fid.cellsize = cellsize
2549    fid.NODATA_value = NODATA_value
2550
2551    fid.zone = zone
2552    fid.false_easting = false_easting
2553    fid.false_northing = false_northing
2554    fid.projection = projection
2555    fid.datum = datum
2556    fid.units = units
2557
2558
2559    # dimension definitions
2560    fid.createDimension('number_of_rows', nrows)
2561    fid.createDimension('number_of_columns', ncols)
2562
2563    # variable definitions
2564    fid.createVariable('elevation', Float, ('number_of_rows',
2565                                            'number_of_columns'))
2566
2567    # Get handles to the variables
2568    elevation = fid.variables['elevation']
2569
2570    #Store data
2571    n = len(lines[6:])
2572    for i, line in enumerate(lines[6:]):
2573        fields = line.split()
2574        if verbose and i%((n+10)/10)==0:
2575            print 'Processing row %d of %d' %(i, nrows)
2576
2577        elevation[i, :] = array([float(x) for x in fields])
2578
2579    fid.close()
2580
2581
2582
2583
2584
2585def ferret2sww(basename_in, basename_out = None,
2586               verbose = False,
2587               minlat = None, maxlat = None,
2588               minlon = None, maxlon = None,
2589               mint = None, maxt = None, mean_stage = 0,
2590               origin = None, zscale = 1,
2591               fail_on_NaN = True,
2592               NaN_filler = 0,
2593               elevation = None,
2594               inverted_bathymetry = True
2595               ): #FIXME: Bathymetry should be obtained
2596                                  #from MOST somehow.
2597                                  #Alternatively from elsewhere
2598                                  #or, as a last resort,
2599                                  #specified here.
2600                                  #The value of -100 will work
2601                                  #for the Wollongong tsunami
2602                                  #scenario but is very hacky
2603    """Convert MOST and 'Ferret' NetCDF format for wave propagation to
2604    sww format native to abstract_2d_finite_volumes.
2605
2606    Specify only basename_in and read files of the form
2607    basefilename_ha.nc, basefilename_ua.nc, basefilename_va.nc containing
2608    relative height, x-velocity and y-velocity, respectively.
2609
2610    Also convert latitude and longitude to UTM. All coordinates are
2611    assumed to be given in the GDA94 datum.
2612
2613    min's and max's: If omitted - full extend is used.
2614    To include a value min may equal it, while max must exceed it.
2615    Lat and lon are assuemd to be in decimal degrees
2616
2617    origin is a 3-tuple with geo referenced
2618    UTM coordinates (zone, easting, northing)
2619
2620    nc format has values organised as HA[TIME, LATITUDE, LONGITUDE]
2621    which means that longitude is the fastest
2622    varying dimension (row major order, so to speak)
2623
2624    ferret2sww uses grid points as vertices in a triangular grid
2625    counting vertices from lower left corner upwards, then right
2626    """
2627
2628    import os
2629    from Scientific.IO.NetCDF import NetCDFFile
2630    from Numeric import Float, Int, Int32, searchsorted, zeros, array
2631    from Numeric import allclose, around
2632
2633    precision = Float
2634
2635    msg = 'Must use latitudes and longitudes for minlat, maxlon etc'
2636
2637    if minlat != None:
2638        assert -90 < minlat < 90 , msg
2639    if maxlat != None:
2640        assert -90 < maxlat < 90 , msg
2641        if minlat != None:
2642            assert maxlat > minlat
2643    if minlon != None:
2644        assert -180 < minlon < 180 , msg
2645    if maxlon != None:
2646        assert -180 < maxlon < 180 , msg
2647        if minlon != None:
2648            assert maxlon > minlon
2649       
2650
2651
2652    #Get NetCDF data
2653    if verbose: print 'Reading files %s_*.nc' %basename_in
2654    #print "basename_in + '_ha.nc'",basename_in + '_ha.nc'
2655    file_h = NetCDFFile(basename_in + '_ha.nc', 'r') #Wave amplitude (cm)
2656    file_u = NetCDFFile(basename_in + '_ua.nc', 'r') #Velocity (x) (cm/s)
2657    file_v = NetCDFFile(basename_in + '_va.nc', 'r') #Velocity (y) (cm/s)
2658    file_e = NetCDFFile(basename_in + '_e.nc', 'r')  #Elevation (z) (m)
2659
2660    if basename_out is None:
2661        swwname = basename_in + '.sww'
2662    else:
2663        swwname = basename_out + '.sww'
2664
2665    # Get dimensions of file_h
2666    for dimension in file_h.dimensions.keys():
2667        if dimension[:3] == 'LON':
2668            dim_h_longitude = dimension
2669        if dimension[:3] == 'LAT':
2670            dim_h_latitude = dimension
2671        if dimension[:4] == 'TIME':
2672            dim_h_time = dimension
2673
2674#    print 'long:', dim_h_longitude
2675#    print 'lats:', dim_h_latitude
2676#    print 'times:', dim_h_time
2677
2678    times = file_h.variables[dim_h_time]
2679    latitudes = file_h.variables[dim_h_latitude]
2680    longitudes = file_h.variables[dim_h_longitude]
2681   
2682    # get dimensions for file_e
2683    for dimension in file_e.dimensions.keys():
2684        if dimension[:3] == 'LON':
2685            dim_e_longitude = dimension
2686        if dimension[:3] == 'LAT':
2687            dim_e_latitude = dimension
2688
2689    # get dimensions for file_u
2690    for dimension in file_u.dimensions.keys():
2691        if dimension[:3] == 'LON':
2692            dim_u_longitude = dimension
2693        if dimension[:3] == 'LAT':
2694            dim_u_latitude = dimension
2695        if dimension[:4] == 'TIME':
2696            dim_u_time = dimension
2697           
2698    # get dimensions for file_v
2699    for dimension in file_v.dimensions.keys():
2700        if dimension[:3] == 'LON':
2701            dim_v_longitude = dimension
2702        if dimension[:3] == 'LAT':
2703            dim_v_latitude = dimension
2704        if dimension[:4] == 'TIME':
2705            dim_v_time = dimension
2706
2707
2708    # Precision used by most for lat/lon is 4 or 5 decimals
2709    e_lat = around(file_e.variables[dim_e_latitude][:], 5)
2710    e_lon = around(file_e.variables[dim_e_longitude][:], 5)
2711
2712    # Check that files are compatible
2713    assert allclose(latitudes, file_u.variables[dim_u_latitude])
2714    assert allclose(latitudes, file_v.variables[dim_v_latitude])
2715    assert allclose(latitudes, e_lat)
2716
2717    assert allclose(longitudes, file_u.variables[dim_u_longitude])
2718    assert allclose(longitudes, file_v.variables[dim_v_longitude])
2719    assert allclose(longitudes, e_lon)
2720
2721    if mint is None:
2722        jmin = 0
2723        mint = times[0]       
2724    else:
2725        jmin = searchsorted(times, mint)
2726       
2727    if maxt is None:
2728        jmax = len(times)
2729        maxt = times[-1]
2730    else:
2731        jmax = searchsorted(times, maxt)
2732
2733    #print "latitudes[:]",latitudes[:]
2734    #print "longitudes[:]",longitudes [:]
2735    kmin, kmax, lmin, lmax = _get_min_max_indexes(latitudes[:],
2736                                                  longitudes[:],
2737                                                  minlat, maxlat,
2738                                                  minlon, maxlon)
2739
2740
2741    times = times[jmin:jmax]
2742    latitudes = latitudes[kmin:kmax]
2743    longitudes = longitudes[lmin:lmax]
2744
2745    #print "latitudes[:]",latitudes[:]
2746    #print "longitudes[:]",longitudes [:]
2747
2748    if verbose: print 'cropping'
2749    zname = 'ELEVATION'
2750
2751    amplitudes = file_h.variables['HA'][jmin:jmax, kmin:kmax, lmin:lmax]
2752    uspeed = file_u.variables['UA'][jmin:jmax, kmin:kmax, lmin:lmax] #Lon
2753    vspeed = file_v.variables['VA'][jmin:jmax, kmin:kmax, lmin:lmax] #Lat
2754    elevations = file_e.variables[zname][kmin:kmax, lmin:lmax]
2755
2756    #    if latitudes2[0]==latitudes[0] and latitudes2[-1]==latitudes[-1]:
2757    #        elevations = file_e.variables['ELEVATION'][kmin:kmax, lmin:lmax]
2758    #    elif latitudes2[0]==latitudes[-1] and latitudes2[-1]==latitudes[0]:
2759    #        from Numeric import asarray
2760    #        elevations=elevations.tolist()
2761    #        elevations.reverse()
2762    #        elevations=asarray(elevations)
2763    #    else:
2764    #        from Numeric import asarray
2765    #        elevations=elevations.tolist()
2766    #        elevations.reverse()
2767    #        elevations=asarray(elevations)
2768    #        'print hmmm'
2769
2770
2771
2772    #Get missing values
2773    nan_ha = file_h.variables['HA'].missing_value[0]
2774    nan_ua = file_u.variables['UA'].missing_value[0]
2775    nan_va = file_v.variables['VA'].missing_value[0]
2776    if hasattr(file_e.variables[zname],'missing_value'):
2777        nan_e  = file_e.variables[zname].missing_value[0]
2778    else:
2779        nan_e = None
2780
2781    #Cleanup
2782    from Numeric import sometrue
2783
2784    missing = (amplitudes == nan_ha)
2785    if sometrue (missing):
2786        if fail_on_NaN:
2787            msg = 'NetCDFFile %s contains missing values'\
2788                  %(basename_in+'_ha.nc')
2789            raise DataMissingValuesError, msg
2790        else:
2791            amplitudes = amplitudes*(missing==0) + missing*NaN_filler
2792
2793    missing = (uspeed == nan_ua)
2794    if sometrue (missing):
2795        if fail_on_NaN:
2796            msg = 'NetCDFFile %s contains missing values'\
2797                  %(basename_in+'_ua.nc')
2798            raise DataMissingValuesError, msg
2799        else:
2800            uspeed = uspeed*(missing==0) + missing*NaN_filler
2801
2802    missing = (vspeed == nan_va)
2803    if sometrue (missing):
2804        if fail_on_NaN:
2805            msg = 'NetCDFFile %s contains missing values'\
2806                  %(basename_in+'_va.nc')
2807            raise DataMissingValuesError, msg
2808        else:
2809            vspeed = vspeed*(missing==0) + missing*NaN_filler
2810
2811
2812    missing = (elevations == nan_e)
2813    if sometrue (missing):
2814        if fail_on_NaN:
2815            msg = 'NetCDFFile %s contains missing values'\
2816                  %(basename_in+'_e.nc')
2817            raise DataMissingValuesError, msg
2818        else:
2819            elevations = elevations*(missing==0) + missing*NaN_filler
2820
2821    #######
2822
2823
2824
2825    number_of_times = times.shape[0]
2826    number_of_latitudes = latitudes.shape[0]
2827    number_of_longitudes = longitudes.shape[0]
2828
2829    assert amplitudes.shape[0] == number_of_times
2830    assert amplitudes.shape[1] == number_of_latitudes
2831    assert amplitudes.shape[2] == number_of_longitudes
2832
2833    if verbose:
2834        print '------------------------------------------------'
2835        print 'Statistics:'
2836        print '  Extent (lat/lon):'
2837        print '    lat in [%f, %f], len(lat) == %d'\
2838              %(min(latitudes.flat), max(latitudes.flat),
2839                len(latitudes.flat))
2840        print '    lon in [%f, %f], len(lon) == %d'\
2841              %(min(longitudes.flat), max(longitudes.flat),
2842                len(longitudes.flat))
2843        print '    t in [%f, %f], len(t) == %d'\
2844              %(min(times.flat), max(times.flat), len(times.flat))
2845
2846        q = amplitudes.flat
2847        name = 'Amplitudes (ha) [cm]'
2848        print %s in [%f, %f]' %(name, min(q), max(q))
2849
2850        q = uspeed.flat
2851        name = 'Speeds (ua) [cm/s]'
2852        print %s in [%f, %f]' %(name, min(q), max(q))
2853
2854        q = vspeed.flat
2855        name = 'Speeds (va) [cm/s]'
2856        print %s in [%f, %f]' %(name, min(q), max(q))
2857
2858        q = elevations.flat
2859        name = 'Elevations (e) [m]'
2860        print %s in [%f, %f]' %(name, min(q), max(q))
2861
2862
2863    #print number_of_latitudes, number_of_longitudes
2864    number_of_points = number_of_latitudes*number_of_longitudes
2865    number_of_volumes = (number_of_latitudes-1)*(number_of_longitudes-1)*2
2866
2867
2868    file_h.close()
2869    file_u.close()
2870    file_v.close()
2871    file_e.close()
2872
2873
2874    # NetCDF file definition
2875    outfile = NetCDFFile(swwname, 'w')
2876
2877    description = 'Converted from Ferret files: %s, %s, %s, %s'\
2878                  %(basename_in + '_ha.nc',
2879                    basename_in + '_ua.nc',
2880                    basename_in + '_va.nc',
2881                    basename_in + '_e.nc')
2882   
2883    #Create new file
2884    starttime = times[0]
2885    sww = Write_sww()
2886    sww.header(outfile, times, number_of_volumes,
2887                         number_of_points, description=description,
2888                         verbose=verbose)
2889
2890    #Store
2891    from anuga.coordinate_transforms.redfearn import redfearn
2892    x = zeros(number_of_points, Float)  #Easting
2893    y = zeros(number_of_points, Float)  #Northing
2894
2895
2896    if verbose: print 'Making triangular grid'
2897    #Check zone boundaries
2898    refzone, _, _ = redfearn(latitudes[0],longitudes[0])
2899
2900    vertices = {}
2901    i = 0
2902    for k, lat in enumerate(latitudes):       #Y direction
2903        for l, lon in enumerate(longitudes):  #X direction
2904
2905            vertices[l,k] = i
2906
2907            zone, easting, northing = redfearn(lat,lon)
2908
2909            msg = 'Zone boundary crossed at longitude =', lon
2910            #assert zone == refzone, msg
2911            #print '%7.2f %7.2f %8.2f %8.2f' %(lon, lat, easting, northing)
2912            x[i] = easting
2913            y[i] = northing
2914            i += 1
2915
2916    #Construct 2 triangles per 'rectangular' element
2917    volumes = []
2918    for l in range(number_of_longitudes-1):    #X direction
2919        for k in range(number_of_latitudes-1): #Y direction
2920            v1 = vertices[l,k+1]
2921            v2 = vertices[l,k]
2922            v3 = vertices[l+1,k+1]
2923            v4 = vertices[l+1,k]
2924
2925            volumes.append([v1,v2,v3]) #Upper element
2926            volumes.append([v4,v3,v2]) #Lower element
2927
2928    volumes = array(volumes)
2929
2930    if origin is None:
2931        origin = Geo_reference(refzone,min(x),min(y))
2932    geo_ref = write_NetCDF_georeference(origin, outfile)
2933   
2934    if elevation is not None:
2935        z = elevation
2936    else:
2937        if inverted_bathymetry:
2938            z = -1*elevations
2939        else:
2940            z = elevations
2941    #FIXME: z should be obtained from MOST and passed in here
2942
2943    from Numeric import resize
2944    z = resize(z,outfile.variables['z'][:].shape)
2945    outfile.variables['x'][:] = x - geo_ref.get_xllcorner()
2946    outfile.variables['y'][:] = y - geo_ref.get_yllcorner()
2947    outfile.variables['z'][:] = z             #FIXME HACK for bacwards compat.
2948    outfile.variables['elevation'][:] = z
2949    outfile.variables['volumes'][:] = volumes.astype(Int32) #For Opteron 64
2950
2951
2952
2953    #Time stepping
2954    stage = outfile.variables['stage']
2955    xmomentum = outfile.variables['xmomentum']
2956    ymomentum = outfile.variables['ymomentum']
2957
2958    if verbose: print 'Converting quantities'
2959    n = len(times)
2960    for j in range(n):
2961        if verbose and j%((n+10)/10)==0: print '  Doing %d of %d' %(j, n)
2962        i = 0
2963        for k in range(number_of_latitudes):      #Y direction
2964            for l in range(number_of_longitudes): #X direction
2965                w = zscale*amplitudes[j,k,l]/100 + mean_stage
2966                stage[j,i] = w
2967                h = w - z[i]
2968                xmomentum[j,i] = uspeed[j,k,l]/100*h
2969                ymomentum[j,i] = vspeed[j,k,l]/100*h
2970                i += 1
2971
2972    #outfile.close()
2973
2974    #FIXME: Refactor using code from file_function.statistics
2975    #Something like print swwstats(swwname)
2976    if verbose:
2977        x = outfile.variables['x'][:]
2978        y = outfile.variables['y'][:]
2979        print '------------------------------------------------'
2980        print 'Statistics of output file:'
2981        print '  Name: %s' %swwname
2982        print '  Reference:'
2983        print '    Lower left corner: [%f, %f]'\
2984              %(geo_ref.get_xllcorner(), geo_ref.get_yllcorner())
2985        print ' Start time: %f' %starttime
2986        print '    Min time: %f' %mint
2987        print '    Max time: %f' %maxt
2988        print '  Extent:'
2989        print '    x [m] in [%f, %f], len(x) == %d'\
2990              %(min(x.flat), max(x.flat), len(x.flat))
2991        print '    y [m] in [%f, %f], len(y) == %d'\
2992              %(min(y.flat), max(y.flat), len(y.flat))
2993        print '    t [s] in [%f, %f], len(t) == %d'\
2994              %(min(times), max(times), len(times))
2995        print '  Quantities [SI units]:'
2996        for name in ['stage', 'xmomentum', 'ymomentum', 'elevation']:
2997            q = outfile.variables[name][:].flat
2998            print '    %s in [%f, %f]' %(name, min(q), max(q))
2999
3000
3001
3002    outfile.close()
3003
3004
3005
3006
3007
3008def timefile2netcdf(filename, quantity_names=None, time_as_seconds=False):
3009    """Template for converting typical text files with time series to
3010    NetCDF tms file.
3011
3012
3013    The file format is assumed to be either two fields separated by a comma:
3014
3015        time [DD/MM/YY hh:mm:ss], value0 value1 value2 ...
3016
3017    E.g
3018
3019      31/08/04 00:00:00, 1.328223 0 0
3020      31/08/04 00:15:00, 1.292912 0 0
3021
3022    or time (seconds), value0 value1 value2 ...
3023   
3024      0.0, 1.328223 0 0
3025      0.1, 1.292912 0 0
3026
3027    will provide a time dependent function f(t) with three attributes
3028
3029    filename is assumed to be the rootname with extenisons .txt and .sww
3030    """
3031
3032    import time, calendar
3033    from Numeric import array
3034    from anuga.config import time_format
3035    from anuga.utilities.numerical_tools import ensure_numeric
3036
3037
3038
3039    fid = open(filename + '.txt')
3040    line = fid.readline()
3041    fid.close()
3042
3043    fields = line.split(',')
3044    msg = 'File %s must have the format date, value0 value1 value2 ...'
3045    assert len(fields) == 2, msg
3046
3047    if not time_as_seconds:
3048        try:
3049            starttime = calendar.timegm(time.strptime(fields[0], time_format))
3050        except ValueError:
3051            msg = 'First field in file %s must be' %filename
3052            msg += ' date-time with format %s.\n' %time_format
3053            msg += 'I got %s instead.' %fields[0]
3054            raise DataTimeError, msg
3055    else:
3056        try:
3057            starttime = float(fields[0])
3058        except Error:
3059            msg = "Bad time format"
3060            raise DataTimeError, msg
3061
3062
3063    #Split values
3064    values = []
3065    for value in fields[1].split():
3066        values.append(float(value))
3067
3068    q = ensure_numeric(values)
3069
3070    msg = 'ERROR: File must contain at least one independent value'
3071    assert len(q.shape) == 1, msg
3072
3073
3074
3075    #Read times proper
3076    from Numeric import zeros, Float, alltrue
3077    from anuga.config import time_format
3078    import time, calendar
3079
3080    fid = open(filename + '.txt')
3081    lines = fid.readlines()
3082    fid.close()
3083
3084    N = len(lines)
3085    d = len(q)
3086
3087    T = zeros(N, Float)       #Time
3088    Q = zeros((N, d), Float)  #Values
3089
3090    for i, line in enumerate(lines):
3091        fields = line.split(',')
3092        if not time_as_seconds:
3093            realtime = calendar.timegm(time.strptime(fields[0], time_format))
3094        else:
3095             realtime = float(fields[0])
3096        T[i] = realtime - starttime
3097
3098        for j, value in enumerate(fields[1].split()):
3099            Q[i, j] = float(value)
3100
3101    msg = 'File %s must list time as a monotonuosly ' %filename
3102    msg += 'increasing sequence'
3103    assert alltrue( T[1:] - T[:-1] > 0 ), msg
3104
3105    #Create NetCDF file
3106    from Scientific.IO.NetCDF import NetCDFFile
3107
3108    fid = NetCDFFile(filename + '.tms', 'w')
3109
3110
3111    fid.institution = 'Geoscience Australia'
3112    fid.description = 'Time series'
3113
3114
3115    #Reference point
3116    #Start time in seconds since the epoch (midnight 1/1/1970)
3117    #FIXME: Use Georef
3118    fid.starttime = starttime
3119
3120    # dimension definitions
3121    #fid.createDimension('number_of_volumes', self.number_of_volumes)
3122    #fid.createDimension('number_of_vertices', 3)
3123
3124
3125    fid.createDimension('number_of_timesteps', len(T))
3126
3127    fid.createVariable('time', Float, ('number_of_timesteps',))
3128
3129    fid.variables['time'][:] = T
3130
3131    for i in range(Q.shape[1]):
3132        try:
3133            name = quantity_names[i]
3134        except:
3135            name = 'Attribute%d'%i
3136
3137        fid.createVariable(name, Float, ('number_of_timesteps',))
3138        fid.variables[name][:] = Q[:,i]
3139
3140    fid.close()
3141
3142
3143def extent_sww(file_name):
3144    """
3145    Read in an sww file.
3146
3147    Input;
3148    file_name - the sww file
3149
3150    Output;
3151    z - Vector of bed elevation
3152    volumes - Array.  Each row has 3 values, representing
3153    the vertices that define the volume
3154    time - Vector of the times where there is stage information
3155    stage - array with respect to time and vertices (x,y)
3156    """
3157
3158
3159    from Scientific.IO.NetCDF import NetCDFFile
3160
3161    #Check contents
3162    #Get NetCDF
3163    fid = NetCDFFile(file_name, 'r')
3164
3165    # Get the variables
3166    x = fid.variables['x'][:]
3167    y = fid.variables['y'][:]
3168    stage = fid.variables['stage'][:]
3169    #print "stage",stage
3170    #print "stage.shap",stage.shape
3171    #print "min(stage.flat), mpythonax(stage.flat)",min(stage.flat), max(stage.flat)
3172    #print "min(stage)",min(stage)
3173
3174    fid.close()
3175
3176    return [min(x),max(x),min(y),max(y),min(stage.flat),max(stage.flat)]
3177
3178
3179def sww2domain(filename,boundary=None,t=None,\
3180               fail_if_NaN=True,NaN_filler=0\
3181               ,verbose = False,very_verbose = False):
3182    """
3183    Usage: domain = sww2domain('file.sww',t=time (default = last time in file))
3184
3185    Boundary is not recommended if domain.smooth is not selected, as it
3186    uses unique coordinates, but not unique boundaries. This means that
3187    the boundary file will not be compatable with the coordinates, and will
3188    give a different final boundary, or crash.
3189    """
3190    NaN=9.969209968386869e+036
3191    #initialise NaN.
3192
3193    from Scientific.IO.NetCDF import NetCDFFile
3194    from shallow_water import Domain
3195    from Numeric import asarray, transpose, resize
3196
3197    if verbose: print 'Reading from ', filename
3198    fid = NetCDFFile(filename, 'r')    #Open existing file for read
3199    time = fid.variables['time']       #Timesteps
3200    if t is None:
3201        t = time[-1]
3202    time_interp = get_time_interp(time,t)
3203
3204    # Get the variables as Numeric arrays
3205    x = fid.variables['x'][:]             #x-coordinates of vertices
3206    y = fid.variables['y'][:]             #y-coordinates of vertices
3207    elevation = fid.variables['elevation']     #Elevation
3208    stage = fid.variables['stage']     #Water level
3209    xmomentum = fid.variables['xmomentum']   #Momentum in the x-direction
3210    ymomentum = fid.variables['ymomentum']   #Momentum in the y-direction
3211
3212    starttime = fid.starttime[0]
3213    volumes = fid.variables['volumes'][:] #Connectivity
3214    coordinates=transpose(asarray([x.tolist(),y.tolist()]))
3215
3216    conserved_quantities = []
3217    interpolated_quantities = {}
3218    other_quantities = []
3219
3220    # get geo_reference
3221    #sww files don't have to have a geo_ref
3222    try:
3223        geo_reference = Geo_reference(NetCDFObject=fid)
3224    except: #AttributeError, e:
3225        geo_reference = None
3226
3227    if verbose: print '    getting quantities'
3228    for quantity in fid.variables.keys():
3229        dimensions = fid.variables[quantity].dimensions
3230        if 'number_of_timesteps' in dimensions:
3231            conserved_quantities.append(quantity)
3232            interpolated_quantities[quantity]=\
3233                  interpolated_quantity(fid.variables[quantity][:],time_interp)
3234        else: other_quantities.append(quantity)
3235
3236    other_quantities.remove('x')
3237    other_quantities.remove('y')
3238    other_quantities.remove('z')
3239    other_quantities.remove('volumes')
3240    try:
3241        other_quantities.remove('stage_range')
3242        other_quantities.remove('xmomentum_range')
3243        other_quantities.remove('ymomentum_range')
3244        other_quantities.remove('elevation_range')
3245    except:
3246        pass
3247       
3248
3249    conserved_quantities.remove('time')
3250
3251    if verbose: print '    building domain'
3252    #    From domain.Domain:
3253    #    domain = Domain(coordinates, volumes,\
3254    #                    conserved_quantities = conserved_quantities,\
3255    #                    other_quantities = other_quantities,zone=zone,\
3256    #                    xllcorner=xllcorner, yllcorner=yllcorner)
3257
3258    #   From shallow_water.Domain:
3259    coordinates=coordinates.tolist()
3260    volumes=volumes.tolist()
3261    #FIXME:should this be in mesh?(peter row)
3262    if fid.smoothing == 'Yes': unique = False
3263    else: unique = True
3264    if unique:
3265        coordinates,volumes,boundary=weed(coordinates,volumes,boundary)
3266
3267
3268    try:
3269        domain = Domain(coordinates, volumes, boundary)
3270    except AssertionError, e:
3271        fid.close()
3272        msg = 'Domain could not be created: %s. Perhaps use "fail_if_NaN=False and NaN_filler = ..."' %e
3273        raise DataDomainError, msg
3274
3275    if not boundary is None:
3276        domain.boundary = boundary
3277
3278    domain.geo_reference = geo_reference
3279
3280    domain.starttime=float(starttime)+float(t)
3281    domain.time=0.0
3282
3283    for quantity in other_quantities:
3284        try:
3285            NaN = fid.variables[quantity].missing_value
3286        except:
3287            pass #quantity has no missing_value number
3288        X = fid.variables[quantity][:]
3289        if very_verbose:
3290            print '       ',quantity
3291            print '        NaN =',NaN
3292            print '        max(X)'
3293            print '       ',max(X)
3294            print '        max(X)==NaN'
3295            print '       ',max(X)==NaN
3296            print ''
3297        if (max(X)==NaN) or (min(X)==NaN):
3298            if fail_if_NaN:
3299                msg = 'quantity "%s" contains no_data entry'%quantity
3300                raise DataMissingValuesError, msg
3301            else:
3302                data = (X<>NaN)
3303                X = (X*data)+(data==0)*NaN_filler
3304        if unique:
3305            X = resize(X,(len(X)/3,3))
3306        domain.set_quantity(quantity,X)
3307    #
3308    for quantity in conserved_quantities:
3309        try:
3310            NaN = fid.variables[quantity].missing_value
3311        except:
3312            pass #quantity has no missing_value number
3313        X = interpolated_quantities[quantity]
3314        if very_verbose:
3315            print '       ',quantity
3316            print '        NaN =',NaN
3317            print '        max(X)'
3318            print '       ',max(X)
3319            print '        max(X)==NaN'
3320            print '       ',max(X)==NaN
3321            print ''
3322        if (max(X)==NaN) or (min(X)==NaN):
3323            if fail_if_NaN:
3324                msg = 'quantity "%s" contains no_data entry'%quantity
3325                raise DataMissingValuesError, msg
3326            else:
3327                data = (X<>NaN)
3328                X = (X*data)+(data==0)*NaN_filler
3329        if unique:
3330            X = resize(X,(X.shape[0]/3,3))
3331        domain.set_quantity(quantity,X)
3332
3333    fid.close()
3334    return domain
3335
3336def interpolated_quantity(saved_quantity,time_interp):
3337
3338    #given an index and ratio, interpolate quantity with respect to time.
3339    index,ratio = time_interp
3340    Q = saved_quantity
3341    if ratio > 0:
3342        q = (1-ratio)*Q[index]+ ratio*Q[index+1]
3343    else:
3344        q = Q[index]
3345    #Return vector of interpolated values
3346    return q
3347
3348def get_time_interp(time,t=None):
3349    #Finds the ratio and index for time interpolation.
3350    #It is borrowed from previous abstract_2d_finite_volumes code.
3351    if t is None:
3352        t=time[-1]
3353        index = -1
3354        ratio = 0.
3355    else:
3356        T = time
3357        tau = t
3358        index=0
3359        msg = 'Time interval derived from file %s [%s:%s]'\
3360            %('FIXMEfilename', T[0], T[-1])
3361        msg += ' does not match model time: %s' %tau
3362        if tau < time[0]: raise DataTimeError, msg
3363        if tau > time[-1]: raise DataTimeError, msg
3364        while tau > time[index]: index += 1
3365        while tau < time[index]: index -= 1
3366        if tau == time[index]:
3367            #Protect against case where tau == time[-1] (last time)
3368            # - also works in general when tau == time[i]
3369            ratio = 0
3370        else:
3371            #t is now between index and index+1
3372            ratio = (tau - time[index])/(time[index+1] - time[index])
3373    return (index,ratio)
3374
3375
3376def weed(coordinates,volumes,boundary = None):
3377    if type(coordinates)==ArrayType:
3378        coordinates = coordinates.tolist()
3379    if type(volumes)==ArrayType:
3380        volumes = volumes.tolist()
3381
3382    unique = False
3383    point_dict = {}
3384    same_point = {}
3385    for i in range(len(coordinates)):
3386        point = tuple(coordinates[i])
3387        if point_dict.has_key(point):
3388            unique = True
3389            same_point[i]=point
3390            #to change all point i references to point j
3391        else:
3392            point_dict[point]=i
3393            same_point[i]=point
3394
3395    coordinates = []
3396    i = 0
3397    for point in point_dict.keys():
3398        point = tuple(point)
3399        coordinates.append(list(point))
3400        point_dict[point]=i
3401        i+=1
3402
3403
3404    for volume in volumes:
3405        for i in range(len(volume)):
3406            index = volume[i]
3407            if index>-1:
3408                volume[i]=point_dict[same_point[index]]
3409
3410    new_boundary = {}
3411    if not boundary is None:
3412        for segment in boundary.keys():
3413            point0 = point_dict[same_point[segment[0]]]
3414            point1 = point_dict[same_point[segment[1]]]
3415            label = boundary[segment]
3416            #FIXME should the bounday attributes be concaterated
3417            #('exterior, pond') or replaced ('pond')(peter row)
3418
3419            if new_boundary.has_key((point0,point1)):
3420                new_boundary[(point0,point1)]=new_boundary[(point0,point1)]#\
3421                                              #+','+label
3422
3423            elif new_boundary.has_key((point1,point0)):
3424                new_boundary[(point1,point0)]=new_boundary[(point1,point0)]#\
3425                                              #+','+label
3426            else: new_boundary[(point0,point1)]=label
3427
3428        boundary = new_boundary
3429
3430    return coordinates,volumes,boundary
3431
3432
3433def decimate_dem(basename_in, stencil, cellsize_new, basename_out=None,
3434                 verbose=False):
3435    """Read Digitial Elevation model from the following NetCDF format (.dem)
3436
3437    Example:
3438
3439    ncols         3121
3440    nrows         1800
3441    xllcorner     722000
3442    yllcorner     5893000
3443    cellsize      25
3444    NODATA_value  -9999
3445    138.3698 137.4194 136.5062 135.5558 ..........
3446
3447    Decimate data to cellsize_new using stencil and write to NetCDF dem format.
3448    """
3449
3450    import os
3451    from Scientific.IO.NetCDF import NetCDFFile
3452    from Numeric import Float, zeros, sum, reshape, equal
3453
3454    root = basename_in
3455    inname = root + '.dem'
3456
3457    #Open existing netcdf file to read
3458    infile = NetCDFFile(inname, 'r')
3459    if verbose: print 'Reading DEM from %s' %inname
3460
3461    #Read metadata
3462    ncols = infile.ncols[0]
3463    nrows = infile.nrows[0]
3464    xllcorner = infile.xllcorner[0]
3465    yllcorner = infile.yllcorner[0]
3466    cellsize = infile.cellsize[0]
3467    NODATA_value = infile.NODATA_value[0]
3468    zone = infile.zone[0]
3469    false_easting = infile.false_easting[0]
3470    false_northing = infile.false_northing[0]
3471    projection = infile.projection
3472    datum = infile.datum
3473    units = infile.units
3474
3475    dem_elevation = infile.variables['elevation']
3476
3477    #Get output file name
3478    if basename_out == None:
3479        outname = root + '_' + repr(cellsize_new) + '.dem'
3480    else:
3481        outname = basename_out + '.dem'
3482
3483    if verbose: print 'Write decimated NetCDF file to %s' %outname
3484
3485    #Determine some dimensions for decimated grid
3486    (nrows_stencil, ncols_stencil) = stencil.shape
3487    x_offset = ncols_stencil / 2
3488    y_offset = nrows_stencil / 2
3489    cellsize_ratio = int(cellsize_new / cellsize)
3490    ncols_new = 1 + (ncols - ncols_stencil) / cellsize_ratio
3491    nrows_new = 1 + (nrows - nrows_stencil) / cellsize_ratio
3492
3493    #Open netcdf file for output
3494    outfile = NetCDFFile(outname, 'w')
3495
3496    #Create new file
3497    outfile.institution = 'Geoscience Australia'
3498    outfile.description = 'NetCDF DEM format for compact and portable storage ' +\
3499                           'of spatial point data'
3500    #Georeferencing
3501    outfile.zone = zone
3502    outfile.projection = projection
3503    outfile.datum = datum
3504    outfile.units = units
3505
3506    outfile.cellsize = cellsize_new
3507    outfile.NODATA_value = NODATA_value
3508    outfile.false_easting = false_easting
3509    outfile.false_northing = false_northing
3510
3511    outfile.xllcorner = xllcorner + (x_offset * cellsize)
3512    outfile.yllcorner = yllcorner + (y_offset * cellsize)
3513    outfile.ncols = ncols_new
3514    outfile.nrows = nrows_new
3515
3516    # dimension definition
3517    outfile.createDimension('number_of_points', nrows_new*ncols_new)
3518
3519    # variable definition
3520    outfile.createVariable('elevation', Float, ('number_of_points',))
3521
3522    # Get handle to the variable
3523    elevation = outfile.variables['elevation']
3524
3525    dem_elevation_r = reshape(dem_elevation, (nrows, ncols))
3526
3527    #Store data
3528    global_index = 0
3529    for i in range(nrows_new):
3530        if verbose: print 'Processing row %d of %d' %(i, nrows_new)
3531        lower_index = global_index
3532        telev =  zeros(ncols_new, Float)
3533        local_index = 0
3534        trow = i * cellsize_ratio
3535
3536        for j in range(ncols_new):
3537            tcol = j * cellsize_ratio
3538            tmp = dem_elevation_r[trow:trow+nrows_stencil, tcol:tcol+ncols_stencil]
3539
3540            #if dem contains 1 or more NODATA_values set value in
3541            #decimated dem to NODATA_value, else compute decimated
3542            #value using stencil
3543            if sum(sum(equal(tmp, NODATA_value))) > 0:
3544                telev[local_index] = NODATA_value
3545            else:
3546                telev[local_index] = sum(sum(tmp * stencil))
3547
3548            global_index += 1
3549            local_index += 1
3550
3551        upper_index = global_index
3552
3553        elevation[lower_index:upper_index] = telev
3554
3555    assert global_index == nrows_new*ncols_new, 'index not equal to number of points'
3556
3557    infile.close()
3558    outfile.close()
3559
3560
3561
3562
3563def tsh2sww(filename, verbose=False): #test_tsh2sww
3564    """
3565    to check if a tsh/msh file 'looks' good.
3566    """
3567
3568
3569    if verbose == True:print 'Creating domain from', filename
3570    domain = pmesh_to_domain_instance(filename, Domain)
3571    if verbose == True:print "Number of triangles = ", len(domain)
3572
3573    domain.smooth = True
3574    domain.format = 'sww'   #Native netcdf visualisation format
3575    file_path, filename = path.split(filename)
3576    filename, ext = path.splitext(filename)
3577    domain.set_name(filename)   
3578    domain.reduction = mean
3579    if verbose == True:print "file_path",file_path
3580    if file_path == "":file_path = "."
3581    domain.set_datadir(file_path)
3582
3583    if verbose == True:
3584        print "Output written to " + domain.get_datadir() + sep + \
3585              domain.get_name() + "." + domain.format
3586    sww = get_dataobject(domain)
3587    sww.store_connectivity()
3588    sww.store_timestep('stage')
3589
3590
3591def asc_csiro2sww(bath_dir,
3592                  elevation_dir,
3593                  ucur_dir,
3594                  vcur_dir,
3595                  sww_file,
3596                  minlat = None, maxlat = None,
3597                  minlon = None, maxlon = None,
3598                  zscale=1,
3599                  mean_stage = 0,
3600                  fail_on_NaN = True,
3601                  elevation_NaN_filler = 0,
3602                  bath_prefix='ba',
3603                  elevation_prefix='el',
3604                  verbose=False):
3605    """
3606    Produce an sww boundary file, from esri ascii data from CSIRO.
3607
3608    Also convert latitude and longitude to UTM. All coordinates are
3609    assumed to be given in the GDA94 datum.
3610
3611    assume:
3612    All files are in esri ascii format
3613
3614    4 types of information
3615    bathymetry
3616    elevation
3617    u velocity
3618    v velocity
3619
3620    Assumptions
3621    The metadata of all the files is the same
3622    Each type is in a seperate directory
3623    One bath file with extention .000
3624    The time period is less than 24hrs and uniform.
3625    """
3626    from Scientific.IO.NetCDF import NetCDFFile
3627
3628    from anuga.coordinate_transforms.redfearn import redfearn
3629
3630    precision = Float # So if we want to change the precision its done here
3631
3632    # go in to the bath dir and load the only file,
3633    bath_files = os.listdir(bath_dir)
3634
3635    bath_file = bath_files[0]
3636    bath_dir_file =  bath_dir + os.sep + bath_file
3637    bath_metadata,bath_grid =  _read_asc(bath_dir_file)
3638
3639    #Use the date.time of the bath file as a basis for
3640    #the start time for other files
3641    base_start = bath_file[-12:]
3642
3643    #go into the elevation dir and load the 000 file
3644    elevation_dir_file = elevation_dir  + os.sep + elevation_prefix \
3645                         + base_start
3646
3647    elevation_files = os.listdir(elevation_dir)
3648    ucur_files = os.listdir(ucur_dir)
3649    vcur_files = os.listdir(vcur_dir)
3650    elevation_files.sort()
3651    # the first elevation file should be the
3652    # file with the same base name as the bath data
3653    assert elevation_files[0] == 'el' + base_start
3654
3655    number_of_latitudes = bath_grid.shape[0]
3656    number_of_longitudes = bath_grid.shape[1]
3657    number_of_volumes = (number_of_latitudes-1)*(number_of_longitudes-1)*2
3658
3659    longitudes = [bath_metadata['xllcorner']+x*bath_metadata['cellsize'] \
3660                  for x in range(number_of_longitudes)]
3661    latitudes = [bath_metadata['yllcorner']+y*bath_metadata['cellsize'] \
3662                 for y in range(number_of_latitudes)]
3663
3664     # reverse order of lat, so the fist lat represents the first grid row
3665    latitudes.reverse()
3666
3667    kmin, kmax, lmin, lmax = _get_min_max_indexes(latitudes[:],longitudes[:],
3668                                                 minlat=minlat, maxlat=maxlat,
3669                                                 minlon=minlon, maxlon=maxlon)
3670
3671
3672    bath_grid = bath_grid[kmin:kmax,lmin:lmax]
3673    latitudes = latitudes[kmin:kmax]
3674    longitudes = longitudes[lmin:lmax]
3675    number_of_latitudes = len(latitudes)
3676    number_of_longitudes = len(longitudes)
3677    number_of_times = len(os.listdir(elevation_dir))
3678    number_of_points = number_of_latitudes*number_of_longitudes
3679    number_of_volumes = (number_of_latitudes-1)*(number_of_longitudes-1)*2
3680
3681    #Work out the times
3682    if len(elevation_files) > 1:
3683        # Assume: The time period is less than 24hrs.
3684        time_period = (int(elevation_files[1][-3:]) - \
3685                      int(elevation_files[0][-3:]))*60*60
3686        times = [x*time_period for x in range(len(elevation_files))]
3687    else:
3688        times = [0.0]
3689
3690
3691    if verbose:
3692        print '------------------------------------------------'
3693        print 'Statistics:'
3694        print '  Extent (lat/lon):'
3695        print '    lat in [%f, %f], len(lat) == %d'\
3696              %(min(latitudes), max(latitudes),
3697                len(latitudes))
3698        print '    lon in [%f, %f], len(lon) == %d'\
3699              %(min(longitudes), max(longitudes),
3700                len(longitudes))
3701        print '    t in [%f, %f], len(t) == %d'\
3702              %(min(times), max(times), len(times))
3703
3704    ######### WRITE THE SWW FILE #############
3705    # NetCDF file definition
3706    outfile = NetCDFFile(sww_file, 'w')
3707
3708    #Create new file
3709    outfile.institution = 'Geoscience Australia'
3710    outfile.description = 'Converted from XXX'
3711
3712
3713    #For sww compatibility
3714    outfile.smoothing = 'Yes'
3715    outfile.order = 1
3716
3717    #Start time in seconds since the epoch (midnight 1/1/1970)
3718    outfile.starttime = starttime = times[0]
3719
3720
3721    # dimension definitions
3722    outfile.createDimension('number_of_volumes', number_of_volumes)
3723
3724    outfile.createDimension('number_of_vertices', 3)
3725    outfile.createDimension('number_of_points', number_of_points)
3726    outfile.createDimension('number_of_timesteps', number_of_times)
3727
3728    # variable definitions
3729    outfile.createVariable('x', precision, ('number_of_points',))
3730    outfile.createVariable('y', precision, ('number_of_points',))
3731    outfile.createVariable('elevation', precision, ('number_of_points',))
3732
3733    #FIXME: Backwards compatibility
3734    outfile.createVariable('z', precision, ('number_of_points',))
3735    #################################
3736
3737    outfile.createVariable('volumes', Int, ('number_of_volumes',
3738                                            'number_of_vertices'))
3739
3740    outfile.createVariable('time', precision,
3741                           ('number_of_timesteps',))
3742
3743    outfile.createVariable('stage', precision,
3744                           ('number_of_timesteps',
3745                            'number_of_points'))
3746
3747    outfile.createVariable('xmomentum', precision,
3748                           ('number_of_timesteps',
3749                            'number_of_points'))
3750
3751    outfile.createVariable('ymomentum', precision,
3752                           ('number_of_timesteps',
3753                            'number_of_points'))
3754
3755    #Store
3756    from anuga.coordinate_transforms.redfearn import redfearn
3757    x = zeros(number_of_points, Float)  #Easting
3758    y = zeros(number_of_points, Float)  #Northing
3759
3760    if verbose: print 'Making triangular grid'
3761    #Get zone of 1st point.
3762    refzone, _, _ = redfearn(latitudes[0],longitudes[0])
3763
3764    vertices = {}
3765    i = 0
3766    for k, lat in enumerate(latitudes):
3767        for l, lon in enumerate(longitudes):
3768
3769            vertices[l,k] = i
3770
3771            zone, easting, northing = redfearn(lat,lon)
3772
3773            msg = 'Zone boundary crossed at longitude =', lon
3774            #assert zone == refzone, msg
3775            #print '%7.2f %7.2f %8.2f %8.2f' %(lon, lat, easting, northing)
3776            x[i] = easting
3777            y[i] = northing
3778            i += 1
3779
3780
3781    #Construct 2 triangles per 'rectangular' element
3782    volumes = []
3783    for l in range(number_of_longitudes-1):    #X direction
3784        for k in range(number_of_latitudes-1): #Y direction
3785            v1 = vertices[l,k+1]
3786            v2 = vertices[l,k]
3787            v3 = vertices[l+1,k+1]
3788            v4 = vertices[l+1,k]
3789
3790            #Note, this is different to the ferrit2sww code
3791            #since the order of the lats is reversed.
3792            volumes.append([v1,v3,v2]) #Upper element
3793            volumes.append([v4,v2,v3]) #Lower element
3794
3795    volumes = array(volumes)
3796
3797    geo_ref = Geo_reference(refzone,min(x),min(y))
3798    geo_ref.write_NetCDF(outfile)
3799
3800    # This will put the geo ref in the middle
3801    #geo_ref = Geo_reference(refzone,(max(x)+min(x))/2.0,(max(x)+min(y))/2.)
3802
3803
3804    if verbose:
3805        print '------------------------------------------------'
3806        print 'More Statistics:'
3807        print '  Extent (/lon):'
3808        print '    x in [%f, %f], len(lat) == %d'\
3809              %(min(x), max(x),
3810                len(x))
3811        print '    y in [%f, %f], len(lon) == %d'\
3812              %(min(y), max(y),
3813                len(y))
3814        print 'geo_ref: ',geo_ref
3815
3816    z = resize(bath_grid,outfile.variables['z'][:].shape)
3817    outfile.variables['x'][:] = x - geo_ref.get_xllcorner()
3818    outfile.variables['y'][:] = y - geo_ref.get_yllcorner()
3819    outfile.variables['z'][:] = z
3820    outfile.variables['elevation'][:] = z  #FIXME HACK
3821    outfile.variables['volumes'][:] = volumes.astype(Int32) #On Opteron 64
3822
3823    stage = outfile.variables['stage']
3824    xmomentum = outfile.variables['xmomentum']
3825    ymomentum = outfile.variables['ymomentum']
3826
3827    outfile.variables['time'][:] = times   #Store time relative
3828
3829    if verbose: print 'Converting quantities'
3830    n = number_of_times
3831    for j in range(number_of_times):
3832        # load in files
3833        elevation_meta, elevation_grid = \
3834           _read_asc(elevation_dir + os.sep + elevation_files[j])
3835
3836        _, u_momentum_grid =  _read_asc(ucur_dir + os.sep + ucur_files[j])
3837        _, v_momentum_grid =  _read_asc(vcur_dir + os.sep + vcur_files[j])
3838
3839        #cut matrix to desired size
3840        elevation_grid = elevation_grid[kmin:kmax,lmin:lmax]
3841        u_momentum_grid = u_momentum_grid[kmin:kmax,lmin:lmax]
3842        v_momentum_grid = v_momentum_grid[kmin:kmax,lmin:lmax]
3843       
3844        # handle missing values
3845        missing = (elevation_grid == elevation_meta['NODATA_value'])
3846        if sometrue (missing):
3847            if fail_on_NaN:
3848                msg = 'File %s contains missing values'\
3849                      %(elevation_files[j])
3850                raise DataMissingValuesError, msg
3851            else:
3852                elevation_grid = elevation_grid*(missing==0) + \
3853                                 missing*elevation_NaN_filler
3854
3855
3856        if verbose and j%((n+10)/10)==0: print '  Doing %d of %d' %(j, n)
3857        i = 0
3858        for k in range(number_of_latitudes):      #Y direction
3859            for l in range(number_of_longitudes): #X direction
3860                w = zscale*elevation_grid[k,l] + mean_stage
3861                stage[j,i] = w
3862                h = w - z[i]
3863                xmomentum[j,i] = u_momentum_grid[k,l]*h
3864                ymomentum[j,i] = v_momentum_grid[k,l]*h
3865                i += 1
3866    outfile.close()
3867
3868def _get_min_max_indexes(latitudes_ref,longitudes_ref,
3869                        minlat=None, maxlat=None,
3870                        minlon=None, maxlon=None):
3871    """
3872    return max, min indexes (for slicing) of the lat and long arrays to cover the area
3873    specified with min/max lat/long
3874
3875    Think of the latitudes and longitudes describing a 2d surface.
3876    The area returned is, if possible, just big enough to cover the
3877    inputed max/min area. (This will not be possible if the max/min area
3878    has a section outside of the latitudes/longitudes area.)
3879
3880    asset  longitudes are sorted,
3881    long - from low to high (west to east, eg 148 - 151)
3882    assert latitudes are sorted, ascending or decending
3883    """
3884    latitudes = latitudes_ref[:]
3885    longitudes = longitudes_ref[:]
3886
3887    latitudes = ensure_numeric(latitudes)
3888    longitudes = ensure_numeric(longitudes)
3889   
3890    assert allclose(sort(longitudes), longitudes)
3891   
3892    lat_ascending = True
3893    if not allclose(sort(latitudes), latitudes):
3894        lat_ascending = False
3895        # reverse order of lat, so it's in ascending order         
3896        latitudes = latitudes[::-1]
3897        assert allclose(sort(latitudes), latitudes)
3898    #print "latitudes  in funct", latitudes
3899   
3900    largest_lat_index = len(latitudes)-1
3901    #Cut out a smaller extent.
3902    if minlat == None:
3903        lat_min_index = 0
3904    else:
3905        lat_min_index = searchsorted(latitudes, minlat)-1
3906        if lat_min_index <0:
3907            lat_min_index = 0
3908
3909
3910    if maxlat == None:
3911        lat_max_index = largest_lat_index #len(latitudes)
3912    else:
3913        lat_max_index = searchsorted(latitudes, maxlat)
3914        if lat_max_index > largest_lat_index:
3915            lat_max_index = largest_lat_index
3916
3917    if minlon == None:
3918        lon_min_index = 0
3919    else:
3920        lon_min_index = searchsorted(longitudes, minlon)-1
3921        if lon_min_index <0:
3922            lon_min_index = 0
3923
3924    if maxlon == None:
3925        lon_max_index = len(longitudes)
3926    else:
3927        lon_max_index = searchsorted(longitudes, maxlon)
3928
3929    # Reversing the indexes, if the lat array is decending
3930    if lat_ascending is False:
3931        lat_min_index, lat_max_index = largest_lat_index - lat_max_index , \
3932                                       largest_lat_index - lat_min_index
3933    lat_max_index = lat_max_index + 1 # taking into account how slicing works
3934    lon_max_index = lon_max_index + 1 # taking into account how slicing works
3935
3936    return lat_min_index, lat_max_index, lon_min_index, lon_max_index
3937
3938
3939def _read_asc(filename, verbose=False):
3940    """Read esri file from the following ASCII format (.asc)
3941
3942    Example:
3943
3944    ncols         3121
3945    nrows         1800
3946    xllcorner     722000
3947    yllcorner     5893000
3948    cellsize      25
3949    NODATA_value  -9999
3950    138.3698 137.4194 136.5062 135.5558 ..........
3951
3952    """
3953
3954    datafile = open(filename)
3955
3956    if verbose: print 'Reading DEM from %s' %(filename)
3957    lines = datafile.readlines()
3958    datafile.close()
3959
3960    if verbose: print 'Got', len(lines), ' lines'
3961
3962    ncols = int(lines.pop(0).split()[1].strip())
3963    nrows = int(lines.pop(0).split()[1].strip())
3964    xllcorner = float(lines.pop(0).split()[1].strip())
3965    yllcorner = float(lines.pop(0).split()[1].strip())
3966    cellsize = float(lines.pop(0).split()[1].strip())
3967    NODATA_value = float(lines.pop(0).split()[1].strip())
3968
3969    assert len(lines) == nrows
3970
3971    #Store data
3972    grid = []
3973
3974    n = len(lines)
3975    for i, line in enumerate(lines):
3976        cells = line.split()
3977        assert len(cells) == ncols
3978        grid.append(array([float(x) for x in cells]))
3979    grid = array(grid)
3980
3981    return {'xllcorner':xllcorner,
3982            'yllcorner':yllcorner,
3983            'cellsize':cellsize,
3984            'NODATA_value':NODATA_value}, grid
3985
3986
3987
3988    ####  URS 2 SWW  ###
3989
3990lon_name = 'LON'
3991lat_name = 'LAT'
3992time_name = 'TIME'
3993precision = Float # So if we want to change the precision its done here       
3994class Write_nc:
3995    """
3996    Write an nc file.
3997   
3998    Note, this should be checked to meet cdc netcdf conventions for gridded
3999    data. http://www.cdc.noaa.gov/cdc/conventions/cdc_netcdf_standard.shtml
4000   
4001    """
4002    def __init__(self,
4003                 quantity_name,
4004                 file_name,
4005                 time_step_count,
4006                 time_step,
4007                 lon,
4008                 lat):
4009        """
4010        time_step_count is the number of time steps.
4011        time_step is the time step size
4012       
4013        pre-condition: quantity_name must be 'HA' 'UA'or 'VA'.
4014        """
4015        self.quantity_name = quantity_name
4016        quantity_units = {'HA':'CENTIMETERS',
4017                              'UA':'CENTIMETERS/SECOND',
4018                              'VA':'CENTIMETERS/SECOND'}       
4019       
4020        multiplier_dic = {'HA':100.0, # To convert from m to cm
4021                              'UA':100.0,  #  m/s to cm/sec
4022                              'VA':-100.0}  # MUX files have positve x in the
4023        # Southern direction.  This corrects for it, when writing nc files.
4024       
4025        self.quantity_multiplier =  multiplier_dic[self.quantity_name]
4026       
4027        #self.file_name = file_name
4028        self.time_step_count = time_step_count
4029        self.time_step = time_step
4030
4031        # NetCDF file definition
4032        self.outfile = NetCDFFile(file_name, 'w')
4033        outfile = self.outfile       
4034
4035        #Create new file
4036        nc_lon_lat_header(outfile, lon, lat)
4037   
4038        # TIME
4039        outfile.createDimension(time_name, None)
4040        outfile.createVariable(time_name, precision, (time_name,))
4041
4042        #QUANTITY
4043        outfile.createVariable(self.quantity_name, precision,
4044                               (time_name, lat_name, lon_name))
4045        outfile.variables[self.quantity_name].missing_value=-1.e+034
4046        outfile.variables[self.quantity_name].units= \
4047                                 quantity_units[self.quantity_name]
4048        outfile.variables[lon_name][:]= ensure_numeric(lon)
4049        outfile.variables[lat_name][:]= ensure_numeric(lat)
4050
4051        #Assume no one will be wanting to read this, while we are writing
4052        #outfile.close()
4053       
4054    def store_timestep(self, quantity_slice):
4055        """
4056        Write a time slice of quantity info
4057        quantity_slice is the data to be stored at this time step
4058        """
4059       
4060        outfile = self.outfile
4061       
4062        # Get the variables
4063        time = outfile.variables[time_name]
4064        quantity = outfile.variables[self.quantity_name]
4065           
4066        i = len(time)
4067
4068        #Store time
4069        time[i] = i*self.time_step #self.domain.time
4070        quantity[i,:] = quantity_slice* self.quantity_multiplier
4071       
4072    def close(self):
4073        self.outfile.close()
4074
4075def urs2sww(basename_in='o', basename_out=None, verbose=False,
4076            remove_nc_files=True,
4077            minlat=None, maxlat=None,
4078            minlon= None, maxlon=None,
4079            mint=None, maxt=None,
4080            mean_stage=0,
4081            origin = None,
4082            zscale=1,
4083            fail_on_NaN=True,
4084            NaN_filler=0,
4085            elevation=None):
4086    """
4087    Convert URS C binary format for wave propagation to
4088    sww format native to abstract_2d_finite_volumes.
4089
4090    Specify only basename_in and read files of the form
4091    basefilename_velocity-z-mux, basefilename_velocity-e-mux and
4092    basefilename_waveheight-n-mux containing relative height,
4093    x-velocity and y-velocity, respectively.
4094
4095    Also convert latitude and longitude to UTM. All coordinates are
4096    assumed to be given in the GDA94 datum. The latitude and longitude
4097    information is for  a grid.
4098
4099    min's and max's: If omitted - full extend is used.
4100    To include a value min may equal it, while max must exceed it.
4101    Lat and lon are assumed to be in decimal degrees.
4102    NOTE: minlon is the most east boundary.
4103   
4104    origin is a 3-tuple with geo referenced
4105    UTM coordinates (zone, easting, northing)
4106    It will be the origin of the sww file. This shouldn't be used,
4107    since all of anuga should be able to handle an arbitary origin.
4108
4109
4110    URS C binary format has data orgainised as TIME, LONGITUDE, LATITUDE
4111    which means that latitude is the fastest
4112    varying dimension (row major order, so to speak)
4113
4114    In URS C binary the latitudes and longitudes are in assending order.
4115    """
4116    if basename_out == None:
4117        basename_out = basename_in
4118    files_out = urs2nc(basename_in, basename_out)
4119    ferret2sww(basename_out,
4120               minlat=minlat,
4121               maxlat=maxlat,
4122               minlon=minlon,
4123               maxlon=maxlon,
4124               mint=mint,
4125               maxt=maxt,
4126               mean_stage=mean_stage,
4127               origin=origin,
4128               zscale=zscale,
4129               fail_on_NaN=fail_on_NaN,
4130               NaN_filler=NaN_filler,
4131               inverted_bathymetry=True,
4132               verbose=verbose)
4133    #print "files_out",files_out
4134    if remove_nc_files:
4135        for file_out in files_out:
4136            os.remove(file_out)
4137   
4138def urs2nc(basename_in = 'o', basename_out = 'urs'):
4139    """
4140    Convert the 3 urs files to 4 nc files.
4141
4142    The name of the urs file names must be;
4143    [basename_in]_velocity-z-mux
4144    [basename_in]_velocity-e-mux
4145    [basename_in]_waveheight-n-mux
4146   
4147    """
4148   
4149    files_in = [basename_in + WAVEHEIGHT_MUX_LABEL,
4150                basename_in + EAST_VELOCITY_LABEL,
4151                basename_in + NORTH_VELOCITY_LABEL]
4152    files_out = [basename_out+'_ha.nc',
4153                 basename_out+'_ua.nc',
4154                 basename_out+'_va.nc']
4155    quantities = ['HA','UA','VA']
4156
4157    for file_name in files_in:
4158        if os.access(file_name, os.F_OK) == 0 :
4159            msg = 'File %s does not exist or is not accessible' %file_name
4160            raise IOError, msg
4161       
4162    hashed_elevation = None
4163    for file_in, file_out, quantity in map(None, files_in,
4164                                           files_out,
4165                                           quantities):
4166        lonlatdep, lon, lat, depth = _binary_c2nc(file_in,
4167                                         file_out,
4168                                         quantity)
4169        #print "lonlatdep", lonlatdep
4170        if hashed_elevation == None:
4171            elevation_file = basename_out+'_e.nc'
4172            write_elevation_nc(elevation_file,
4173                                lon,
4174                                lat,
4175                                depth)
4176            hashed_elevation = myhash(lonlatdep)
4177        else:
4178            msg = "The elevation information in the mux files is inconsistent"
4179            assert hashed_elevation == myhash(lonlatdep), msg
4180    files_out.append(elevation_file)
4181    return files_out
4182   
4183def _binary_c2nc(file_in, file_out, quantity):
4184    """
4185    Reads in a quantity urs file and writes a quantity nc file.
4186    additionally, returns the depth and lat, long info,
4187    so it can be written to a file.
4188    """
4189    columns = 3 # long, lat , depth
4190    mux_file = open(file_in, 'rb')
4191   
4192    # Number of points/stations
4193    (points_num,)= unpack('i',mux_file.read(4))
4194
4195    # nt, int - Number of time steps
4196    (time_step_count,)= unpack('i',mux_file.read(4))
4197
4198    #dt, float - time step, seconds
4199    (time_step,) = unpack('f', mux_file.read(4))
4200   
4201    msg = "Bad data in the mux file."
4202    if points_num < 0:
4203        mux_file.close()
4204        raise ANUGAError, msg
4205    if time_step_count < 0:
4206        mux_file.close()
4207        raise ANUGAError, msg
4208    if time_step < 0:
4209        mux_file.close()
4210        raise ANUGAError, msg
4211   
4212    lonlatdep = p_array.array('f')
4213    lonlatdep.read(mux_file, columns * points_num)
4214    lonlatdep = array(lonlatdep, typecode=Float)   
4215    lonlatdep = reshape(lonlatdep, (points_num, columns))
4216   
4217    lon, lat, depth = lon_lat2grid(lonlatdep)
4218    lon_sorted = list(lon)
4219    lon_sorted.sort()
4220
4221    if not lon == lon_sorted:
4222        msg = "Longitudes in mux file are not in ascending order"
4223        raise IOError, msg
4224    lat_sorted = list(lat)
4225    lat_sorted.sort()
4226
4227    if not lat == lat_sorted:
4228        msg = "Latitudes in mux file are not in ascending order"
4229   
4230    nc_file = Write_nc(quantity,
4231                       file_out,
4232                       time_step_count,
4233                       time_step,
4234                       lon,
4235                       lat)
4236
4237    for i in range(time_step_count):
4238        #Read in a time slice  from mux file 
4239        hz_p_array = p_array.array('f')
4240        hz_p_array.read(mux_file, points_num)
4241        hz_p = array(hz_p_array, typecode=Float)
4242        hz_p = reshape(hz_p, (len(lon), len(lat)))
4243        hz_p = transpose(hz_p) #mux has lat varying fastest, nc has long v.f.
4244
4245        #write time slice to nc file
4246        nc_file.store_timestep(hz_p)
4247    mux_file.close()
4248    nc_file.close()
4249
4250    return lonlatdep, lon, lat, depth
4251   
4252
4253def write_elevation_nc(file_out, lon, lat, depth_vector):
4254    """
4255    Write an nc elevation file.
4256    """
4257   
4258    # NetCDF file definition
4259    outfile = NetCDFFile(file_out, 'w')
4260
4261    #Create new file
4262    nc_lon_lat_header(outfile, lon, lat)
4263   
4264    # ELEVATION
4265    zname = 'ELEVATION'
4266    outfile.createVariable(zname, precision, (lat_name, lon_name))
4267    outfile.variables[zname].units='CENTIMETERS'
4268    outfile.variables[zname].missing_value=-1.e+034
4269
4270    outfile.variables[lon_name][:]= ensure_numeric(lon)
4271    outfile.variables[lat_name][:]= ensure_numeric(lat)
4272
4273    depth = reshape(depth_vector, ( len(lat), len(lon)))
4274    outfile.variables[zname][:]= depth
4275   
4276    outfile.close()
4277   
4278def nc_lon_lat_header(outfile, lon, lat):
4279    """
4280    outfile is the netcdf file handle.
4281    lon - a list/array of the longitudes
4282    lat - a list/array of the latitudes
4283    """
4284   
4285    outfile.institution = 'Geoscience Australia'
4286    outfile.description = 'Converted from URS binary C'
4287   
4288    # Longitude
4289    outfile.createDimension(lon_name, len(lon))
4290    outfile.createVariable(lon_name, precision, (lon_name,))
4291    outfile.variables[lon_name].point_spacing='uneven'
4292    outfile.variables[lon_name].units='degrees_east'
4293    outfile.variables[lon_name].assignValue(lon)
4294
4295
4296    # Latitude
4297    outfile.createDimension(lat_name, len(lat))
4298    outfile.createVariable(lat_name, precision, (lat_name,))
4299    outfile.variables[lat_name].point_spacing='uneven'
4300    outfile.variables[lat_name].units='degrees_north'
4301    outfile.variables[lat_name].assignValue(lat)
4302
4303
4304   
4305def lon_lat2grid(long_lat_dep):
4306    """
4307    given a list of points that are assumed to be an a grid,
4308    return the long's and lat's of the grid.
4309    long_lat_dep is an array where each row is a position.
4310    The first column is longitudes.
4311    The second column is latitudes.
4312
4313    The latitude is the fastest varying dimension - in mux files
4314    """
4315    LONG = 0
4316    LAT = 1
4317    QUANTITY = 2
4318
4319    long_lat_dep = ensure_numeric(long_lat_dep, Float)
4320   
4321    num_points = long_lat_dep.shape[0]
4322    this_rows_long = long_lat_dep[0,LONG]
4323
4324    # Count the length of unique latitudes
4325    i = 0
4326    while long_lat_dep[i,LONG] == this_rows_long and i < num_points:
4327        i += 1
4328    # determine the lats and longsfrom the grid
4329    lat = long_lat_dep[:i, LAT]       
4330    long = long_lat_dep[::i, LONG]
4331   
4332    lenlong = len(long)
4333    lenlat = len(lat)
4334    #print 'len lat', lat, len(lat)
4335    #print 'len long', long, len(long)
4336         
4337    msg = 'Input data is not gridded'     
4338    assert num_points % lenlat == 0, msg
4339    assert num_points % lenlong == 0, msg
4340         
4341    # Test that data is gridded       
4342    for i in range(lenlong):
4343        msg = 'Data is not gridded.  It must be for this operation'
4344        first = i*lenlat
4345        last = first + lenlat
4346               
4347        assert allclose(long_lat_dep[first:last,LAT], lat), msg
4348        assert allclose(long_lat_dep[first:last,LONG], long[i]), msg
4349   
4350   
4351#    print 'range long', min(long), max(long)
4352#    print 'range lat', min(lat), max(lat)
4353#    print 'ref long', min(long_lat_dep[:,0]), max(long_lat_dep[:,0])
4354#    print 'ref lat', min(long_lat_dep[:,1]), max(long_lat_dep[:,1])
4355   
4356   
4357   
4358    msg = 'Out of range latitudes/longitudes'
4359    for l in lat:assert -90 < l < 90 , msg
4360    for l in long:assert -180 < l < 180 , msg
4361
4362    # Changing quantity from lat being the fastest varying dimension to
4363    # long being the fastest varying dimension
4364    # FIXME - make this faster/do this a better way
4365    # use numeric transpose, after reshaping the quantity vector
4366#    quantity = zeros(len(long_lat_dep), Float)
4367    quantity = zeros(num_points, Float)
4368   
4369#    print 'num',num_points
4370    for lat_i, _ in enumerate(lat):
4371        for long_i, _ in enumerate(long):
4372            q_index = lat_i*lenlong+long_i
4373            lld_index = long_i*lenlat+lat_i
4374#            print 'lat_i', lat_i, 'long_i',long_i, 'q_index', q_index, 'lld_index', lld_index
4375            temp = long_lat_dep[lld_index, QUANTITY]
4376            quantity[q_index] = temp
4377           
4378    return long, lat, quantity
4379
4380    ####  END URS 2 SWW  ###
4381
4382    #### URS UNGRIDDED 2 SWW ###
4383
4384    ### PRODUCING THE POINTS NEEDED FILE ###
4385LL_LAT = -50.0
4386LL_LONG = 80.0
4387GRID_SPACING = 1.0/60.0
4388LAT_AMOUNT = 4800
4389LONG_AMOUNT = 3600
4390def URS_points_needed_to_file(file_name, boundary_polygon, zone,
4391                              ll_lat=LL_LAT, ll_long=LL_LONG,
4392                              grid_spacing=GRID_SPACING, 
4393                              lat_amount=LAT_AMOUNT, long_amount=LONG_AMOUNT,
4394                              export_csv=False, use_cache=False,
4395                              verbose=False):
4396    """
4397    file_name - name of the urs file produced for David.
4398    boundary_polygon - a list of points that describes a polygon.
4399                      The last point is assumed ot join the first point.
4400                      This is in UTM (lat long would be better though)
4401
4402    ll_lat - lower left latitude, in decimal degrees
4403    ll-long - lower left longitude, in decimal degrees
4404    grid_spacing - in deciamal degrees
4405
4406
4407    Don't add the file extension.  It will be added.
4408    """
4409    geo = URS_points_needed(boundary_polygon, zone, ll_lat, ll_long,
4410                            grid_spacing, 
4411                      lat_amount, long_amount,use_cache, verbose)
4412    if not file_name[-4:] == ".urs":
4413        file_name += ".urs"
4414    geo.export_points_file(file_name)
4415    if export_csv:
4416        if file_name[-4:] == ".urs":
4417            file_name = file_name[:-4] + ".csv"
4418        geo.export_points_file(file_name)
4419
4420def URS_points_needed(boundary_polygon, zone, ll_lat=LL_LAT,
4421                      ll_long=LL_LONG, grid_spacing=GRID_SPACING, 
4422                      lat_amount=LAT_AMOUNT, long_amount=LONG_AMOUNT,
4423                      use_cache=False, verbose=False):
4424    args = (boundary_polygon,
4425                      zone)
4426    kwargs = {'ll_lat': ll_lat,
4427              'll_long': ll_long,
4428              'grid_spacing': grid_spacing,
4429              'lat_amount': lat_amount,
4430              'long_amount': long_amount} 
4431    if use_cache is True:
4432        try:
4433            from anuga.caching import cache
4434        except:
4435            msg = 'Caching was requested, but caching module'+\
4436                  'could not be imported'
4437            raise msg
4438
4439
4440        geo = cache(_URS_points_needed,
4441                  args, kwargs,
4442                  verbose=verbose,
4443                  compression=False)
4444    else:
4445        #I was getting 'got multiple values for keyword argument' errors
4446        #geo = apply(_URS_points_needed, args, kwargs)
4447        geo = _URS_points_needed(boundary_polygon,
4448                      zone, ll_lat,
4449                      ll_long, grid_spacing, 
4450                      lat_amount, long_amount)
4451
4452    return geo   
4453def _URS_points_needed(boundary_polygon,
4454                      zone, ll_lat=LL_LAT,
4455                      ll_long=LL_LONG, grid_spacing=GRID_SPACING, 
4456                      lat_amount=LAT_AMOUNT, long_amount=LONG_AMOUNT):
4457    """
4458
4459    boundary_polygon - a list of points that describes a polygon.
4460                      The last point is assumed ot join the first point.
4461                      This is in UTM (lat long would be better though)
4462
4463    ll_lat - lower left latitude, in decimal degrees
4464    ll-long - lower left longitude, in decimal degrees
4465    grid_spacing - in deciamal degrees
4466
4467    """
4468   
4469    from sets import ImmutableSet
4470   
4471    msg = "grid_spacing can not be zero"
4472    assert not grid_spacing ==0, msg
4473    a = boundary_polygon
4474    # List of segments.  Each segment is two points.
4475    segs = [i and [a[i-1], a[i]] or [a[len(a)-1], a[0]] for i in range(len(a))]
4476
4477    # convert the segs to Lat's and longs.
4478   
4479    # Don't assume the zone of the segments is the same as the lower left
4480    # corner of the lat long data!!  They can easily be in different zones
4481   
4482    lat_long_set = ImmutableSet()
4483    for seg in segs:
4484        points_lat_long = points_needed(seg, ll_lat, ll_long, grid_spacing, 
4485                      lat_amount, long_amount, zone)
4486        lat_long_set |= ImmutableSet(points_lat_long)
4487    #print "lat_long_set",lat_long_set
4488    geo = Geospatial_data(data_points=list(lat_long_set),
4489                              points_are_lats_longs=True)
4490    return geo
4491   
4492def points_needed(seg, ll_lat, ll_long, grid_spacing, 
4493                  lat_amount, long_amount, zone):
4494    """
4495    return a list of the points, in lats and longs that are needed to
4496    interpolate any point on the segment.
4497    """
4498    from math import sqrt
4499    #print "zone",zone
4500    geo_reference = Geo_reference(zone=zone)
4501    #print "seg",seg
4502    geo = Geospatial_data(seg,geo_reference=geo_reference)
4503    seg_lat_long = geo.get_data_points(as_lat_long=True)
4504    #print "seg_lat_long", seg_lat_long
4505    # 1.415 = 2^0.5, rounded up....
4506    sqrt_2_rounded_up = 1.415
4507    buffer = sqrt_2_rounded_up * grid_spacing
4508   
4509    max_lat = max(seg_lat_long[0][0], seg_lat_long[1][0]) + buffer
4510    max_long = max(seg_lat_long[0][1], seg_lat_long[1][1]) + buffer
4511    min_lat = min(seg_lat_long[0][0], seg_lat_long[1][0]) - buffer
4512    min_long = min(seg_lat_long[0][1], seg_lat_long[1][1]) - buffer
4513
4514    first_row = (min_long - ll_long)/grid_spacing
4515    # To round up
4516    first_row_long = int(round(first_row + 0.5))
4517    #print "first_row", first_row_long
4518
4519    last_row = (max_long - ll_long)/grid_spacing # round down
4520    last_row_long = int(round(last_row))
4521    #print "last_row",last_row _long
4522   
4523    first_row = (min_lat - ll_lat)/grid_spacing
4524    # To round up
4525    first_row_lat = int(round(first_row + 0.5))
4526    #print "first_row", first_row_lat
4527
4528    last_row = (max_lat - ll_lat)/grid_spacing # round down
4529    last_row_lat = int(round(last_row))
4530    #print "last_row",last_row_lat
4531
4532    # to work out the max distance -
4533    # 111120 - horizontal distance between 1 deg latitude.
4534    #max_distance = sqrt_2_rounded_up * 111120 * grid_spacing
4535    max_distance = 157147.4112 * grid_spacing
4536    #print "max_distance", max_distance #2619.12 m for 1 minute
4537    points_lat_long = []
4538    # Create a list of the lat long points to include.
4539    for index_lat in range(first_row_lat, last_row_lat + 1):
4540        for index_long in range(first_row_long, last_row_long + 1):
4541            lat = ll_lat + index_lat*grid_spacing
4542            long = ll_long + index_long*grid_spacing
4543
4544            #filter here to keep good points
4545            if keep_point(lat, long, seg, max_distance):
4546                points_lat_long.append((lat, long)) #must be hashable
4547    #print "points_lat_long", points_lat_long
4548
4549    # Now that we have these points, lets throw ones out that are too far away
4550    return points_lat_long
4551
4552def keep_point(lat, long, seg, max_distance):
4553    """
4554    seg is two points, UTM
4555    """
4556    from math import sqrt
4557    _ , x0, y0 = redfearn(lat, long)
4558    x1 = seg[0][0]
4559    y1 = seg[0][1]
4560    x2 = seg[1][0]
4561    y2 = seg[1][1]
4562
4563    x2_1 = x2-x1
4564    y2_1 = y2-y1
4565    d = abs((x2_1)*(y1-y0)-(x1-x0)*(y2_1))/sqrt((x2_1)*(x2_1)+(y2_1)*(y2_1))
4566    if d <= max_distance:
4567        return True
4568    else:
4569        return False
4570   
4571    #### CONVERTING UNGRIDDED URS DATA TO AN SWW FILE ####
4572   
4573WAVEHEIGHT_MUX_LABEL = '_waveheight-z-mux'
4574EAST_VELOCITY_LABEL =  '_velocity-e-mux'
4575NORTH_VELOCITY_LABEL =  '_velocity-n-mux' 
4576def urs_ungridded2sww(basename_in='o', basename_out=None, verbose=False,
4577                      mint=None, maxt=None,
4578                      mean_stage=0,
4579                      origin=None,
4580                      hole_points_UTM=None,
4581                      zscale=1):
4582    """   
4583    Convert URS C binary format for wave propagation to
4584    sww format native to abstract_2d_finite_volumes.
4585
4586
4587    Specify only basename_in and read files of the form
4588    basefilename_velocity-z-mux, basefilename_velocity-e-mux and
4589    basefilename_waveheight-n-mux containing relative height,
4590    x-velocity and y-velocity, respectively.
4591
4592    Also convert latitude and longitude to UTM. All coordinates are
4593    assumed to be given in the GDA94 datum. The latitude and longitude
4594    information is assumed ungridded grid.
4595
4596    min's and max's: If omitted - full extend is used.
4597    To include a value min ans max may equal it.
4598    Lat and lon are assumed to be in decimal degrees.
4599   
4600    origin is a 3-tuple with geo referenced
4601    UTM coordinates (zone, easting, northing)
4602    It will be the origin of the sww file. This shouldn't be used,
4603    since all of anuga should be able to handle an arbitary origin.
4604    The mux point info is NOT relative to this origin.
4605
4606
4607    URS C binary format has data orgainised as TIME, LONGITUDE, LATITUDE
4608    which means that latitude is the fastest
4609    varying dimension (row major order, so to speak)
4610
4611    In URS C binary the latitudes and longitudes are in assending order.
4612
4613    Note, interpolations of the resulting sww file will be different
4614    from results of urs2sww.  This is due to the interpolation
4615    function used, and the different grid structure between urs2sww
4616    and this function.
4617   
4618    Interpolating data that has an underlying gridded source can
4619    easily end up with different values, depending on the underlying
4620    mesh.
4621
4622    consider these 4 points
4623    50  -50
4624
4625    0     0
4626
4627    The grid can be
4628     -
4629    |\|    A
4630     -
4631     or;
4632      -
4633     |/|   B
4634      -
4635      If a point is just below the center of the midpoint, it will have a
4636      +ve value in grid A and a -ve value in grid B.
4637    """ 
4638    from anuga.pmesh.mesh import Mesh, NoTrianglesError
4639
4640    files_in = [basename_in + WAVEHEIGHT_MUX_LABEL,
4641                basename_in + EAST_VELOCITY_LABEL,
4642                basename_in + NORTH_VELOCITY_LABEL]
4643    quantities = ['HA','UA','VA']
4644
4645    # instanciate urs_points of the three mux files.
4646    mux = {}
4647    for quantity, file in map(None, quantities, files_in):
4648        mux[quantity] = Urs_points(file)
4649       
4650    # Could check that the depth is the same. (hashing)
4651
4652    # handle to a mux file to do depth stuff
4653    a_mux = mux[quantities[0]]
4654   
4655    # Convert to utm
4656    lat = a_mux.lonlatdep[:,1]
4657    long = a_mux.lonlatdep[:,0]
4658    points_utm, zone = convert_from_latlon_to_utm( \
4659        latitudes=lat, longitudes=long)
4660    #print "points_utm", points_utm
4661    #print "zone", zone
4662
4663    elevation = a_mux.lonlatdep[:,2] * -1 #
4664   
4665    # grid ( create a mesh from the selected points)
4666    # This mesh has a problem.  Triangles are streched over ungridded areas.
4667    #  If these areas could be described as holes in pmesh, that would be great
4668
4669    # I can't just get the user to selection a point in the middle.
4670    # A boundary is needed around these points.
4671    # But if the zone of points is obvious enough auto-segment should do
4672    # a good boundary.
4673    mesh = Mesh()
4674    mesh.add_vertices(points_utm)
4675    mesh.auto_segment(smooth_indents=True, expand_pinch=True)
4676    # To try and avoid alpha shape 'hugging' too much
4677    mesh.auto_segment( mesh.shape.get_alpha()*1.1 )
4678    if hole_points_UTM is not None:
4679        point = ensure_absolute(hole_points_UTM)
4680        mesh.add_hole(point[0], point[1])
4681    try:
4682        mesh.generate_mesh(minimum_triangle_angle=0.0, verbose=False)
4683    except NoTrianglesError:
4684        # This is a bit of a hack, going in and changing the
4685        # data structure.
4686        mesh.holes = []
4687        mesh.generate_mesh(minimum_triangle_angle=0.0, verbose=False)
4688    mesh_dic = mesh.Mesh2MeshList()
4689
4690    #mesh.export_mesh_file(basename_in + '_168.tsh')
4691    #import sys; sys.exit()
4692    # These are the times of the mux file
4693    mux_times = []
4694    for i in range(a_mux.time_step_count):
4695        mux_times.append(a_mux.time_step * i) 
4696    mux_times_start_i, mux_times_fin_i = mux2sww_time(mux_times, mint, maxt)
4697    times = mux_times[mux_times_start_i:mux_times_fin_i]
4698   
4699    if mux_times_start_i == mux_times_fin_i:
4700        # Close the mux files
4701        for quantity, file in map(None, quantities, files_in):
4702            mux[quantity].close()
4703        msg="Due to mint and maxt there's no time info in the boundary SWW."
4704        raise Exception, msg
4705       
4706    # If this raise is removed there is currently no downstream errors
4707           
4708    points_utm=ensure_numeric(points_utm)
4709    #print "mesh_dic['generatedpointlist']", mesh_dic['generatedpointlist']
4710    #print "points_utm", points_utm
4711    assert ensure_numeric(mesh_dic['generatedpointlist']) == \
4712           ensure_numeric(points_utm)
4713   
4714    volumes = mesh_dic['generatedtrianglelist']
4715   
4716    # write sww intro and grid stuff.   
4717    if basename_out is None:
4718        swwname = basename_in + '.sww'
4719    else:
4720        swwname = basename_out + '.sww'
4721
4722    if verbose: print 'Output to ', swwname
4723    outfile = NetCDFFile(swwname, 'w')
4724    # For a different way of doing this, check out tsh2sww
4725    # work out sww_times and the index range this covers
4726    sww = Write_sww()
4727    sww.header(outfile, times, len(volumes), len(points_utm),
4728                         verbose=verbose)
4729    outfile.mean_stage = mean_stage
4730    outfile.zscale = zscale
4731
4732    sww.triangulation(outfile, points_utm, volumes,
4733                            elevation, zone,  new_origin=origin,
4734                            verbose=verbose)
4735   
4736    if verbose: print 'Converting quantities'
4737    j = 0
4738    # Read in a time slice from each mux file and write it to the sww file
4739    for ha, ua, va in map(None, mux['HA'], mux['UA'], mux['VA']):
4740        if j >= mux_times_start_i and j < mux_times_fin_i:
4741            stage = zscale*ha + mean_stage
4742            h = stage - elevation
4743            xmomentum = ua*h
4744            ymomentum = -1*va*h # -1 since in mux files south is positive.
4745            sww.quantities(outfile, 
4746                                     slice_index=j - mux_times_start_i,
4747                                     verbose=verbose,
4748                                     stage=stage,
4749                                     xmomentum=xmomentum,
4750                                     ymomentum=ymomentum)
4751        j += 1
4752    if verbose: sww.verbose_quantities(outfile)
4753    outfile.close()
4754    #
4755    # Do some conversions while writing the sww file
4756
4757   
4758def mux2sww_time(mux_times, mint, maxt):
4759    """
4760    """
4761
4762    if mint == None:
4763        mux_times_start_i = 0
4764    else:
4765        mux_times_start_i = searchsorted(mux_times, mint)
4766       
4767    if maxt == None:
4768        mux_times_fin_i = len(mux_times)
4769    else:
4770        maxt += 0.5 # so if you specify a time where there is
4771                    # data that time will be included
4772        mux_times_fin_i = searchsorted(mux_times, maxt)
4773
4774    return mux_times_start_i, mux_times_fin_i
4775
4776
4777class Write_sww:
4778    from anuga.shallow_water.shallow_water_domain import Domain
4779    sww_quantities = Domain.conserved_quantities
4780    RANGE = '_range'
4781    def __init__(self):
4782        pass
4783   
4784    def header(self,outfile, times, number_of_volumes,
4785                         number_of_points,
4786                         description='Converted from XXX',
4787                         smoothing=True,
4788                         order=1, verbose=False):
4789        """
4790        outfile - the name of the file that will be written
4791        times - A list of the time slice times OR a start time
4792        Note, if a list is given the info will be made relative.
4793        number_of_volumes - the number of triangles
4794        """
4795   
4796        outfile.institution = 'Geoscience Australia'
4797        outfile.description = description
4798
4799        #For sww compatibility
4800        if smoothing is True:
4801            # Smoothing to be depreciated
4802            outfile.smoothing = 'Yes'
4803            outfile.vertices_are_stored_uniquely = 'False'
4804        else:
4805            # Smoothing to be depreciated
4806            outfile.smoothing = 'No'
4807            outfile.vertices_are_stored_uniquely = 'True'
4808        outfile.order = order
4809
4810        try:
4811            revision_number = get_revision_number()
4812        except:
4813            revision_number = None
4814        # writing a string so None can be written   
4815        outfile.revision_number = str(revision_number)
4816        #times - A list or array of the time slice times OR a start time
4817        #times = ensure_numeric(times)
4818        #Start time in seconds since the epoch (midnight 1/1/1970)
4819
4820        # this is being used to seperate one number from a list.
4821        # what it is actually doing is sorting lists from numeric arrays.
4822        if type(times) is list or type(times) is ArrayType: 
4823            number_of_times = len(times)
4824            times = ensure_numeric(times) 
4825            if number_of_times == 0:
4826                starttime = 0
4827            else:
4828                starttime = times[0]
4829                times = times - starttime  #Store relative times
4830        else:
4831            number_of_times = 0
4832            starttime = times
4833            #times = ensure_numeric([])
4834        outfile.starttime = starttime
4835        # dimension definitions
4836        outfile.createDimension('number_of_volumes', number_of_volumes)
4837        outfile.createDimension('number_of_vertices', 3)
4838        outfile.createDimension('numbers_in_range', 2)
4839   
4840        if smoothing is True:
4841            outfile.createDimension('number_of_points', number_of_points)
4842       
4843            # FIXME(Ole): This will cause sww files for paralle domains to
4844            # have ghost nodes stored (but not used by triangles).
4845            # To clean this up, we have to change get_vertex_values and
4846            # friends in quantity.py (but I can't be bothered right now)
4847        else:
4848            outfile.createDimension('number_of_points', 3*number_of_volumes)
4849        outfile.createDimension('number_of_timesteps', number_of_times)
4850
4851        # variable definitions
4852        outfile.createVariable('x', precision, ('number_of_points',))
4853        outfile.createVariable('y', precision, ('number_of_points',))
4854        outfile.createVariable('elevation', precision, ('number_of_points',))
4855        q = 'elevation'
4856        outfile.createVariable(q+Write_sww.RANGE, precision,
4857                               ('numbers_in_range',))
4858        # The values are initally filled with large (10e+36) numbers.
4859        # I'm relying on this feature.  Maybe I shouldn't?
4860        outfile.variables[q+Write_sww.RANGE][1] = \
4861                          -1*outfile.variables[q+Write_sww.RANGE][1]
4862
4863        #FIXME: Backwards compatibility
4864        outfile.createVariable('z', precision, ('number_of_points',))
4865        #################################
4866
4867        outfile.createVariable('volumes', Int, ('number_of_volumes',
4868                                                'number_of_vertices'))
4869       
4870        outfile.createVariable('time', precision,
4871                               ('number_of_timesteps',))
4872       
4873        for q in Write_sww.sww_quantities:
4874            outfile.createVariable(q, precision,
4875                                   ('number_of_timesteps',
4876                                    'number_of_points')) 
4877            outfile.createVariable(q+Write_sww.RANGE, precision,
4878                                   ('numbers_in_range',))
4879            # Initialising the max value to a very small number.
4880            # It assumes that netcdf initialises it to a very large number
4881            outfile.variables[q+Write_sww.RANGE][1] = \
4882                -outfile.variables[q+Write_sww.RANGE][1]
4883           
4884        if type(times) is list or type(times) is ArrayType: 
4885            outfile.variables['time'][:] = times    #Store time relative
4886           
4887        if verbose:
4888            print '------------------------------------------------'
4889            print 'Statistics:'
4890            print '    t in [%f, %f], len(t) == %d'\
4891                  %(min(times.flat), max(times.flat), len(times.flat))
4892
4893       
4894    def triangulation(self,outfile, points_utm, volumes,
4895                                elevation, zone=None, new_origin=None, 
4896                                points_georeference=None, verbose=False):
4897        """
4898       
4899        new_origin - qa georeference that the points can be set to. (Maybe
4900        do this before calling this function.)
4901       
4902        points_utm - currently a list or array of the points in UTM.
4903        points_georeference - the georeference of the points_utm
4904       
4905        How about passing new_origin and current_origin.
4906        If you get both, do a convertion from the old to the new.
4907       
4908        If you only get new_origin, the points are absolute,
4909        convert to relative
4910       
4911        if you only get the current_origin the points are relative, store
4912        as relative.
4913       
4914        if you get no georefs create a new georef based on the minimums of
4915        points_utm.  (Another option would be to default to absolute)
4916       
4917        Yes, and this is done in another part of the code.
4918        Probably geospatial.
4919       
4920        If you don't supply either geo_refs, then supply a zone. If not
4921        the default zone will be used.
4922       
4923       
4924        precon
4925       
4926        header has been called.
4927        """
4928       
4929        number_of_points = len(points_utm)   
4930        volumes = array(volumes) 
4931        points_utm = array(points_utm)
4932
4933        # given the two geo_refs and the points, do the stuff
4934        # described in the method header
4935        # if this is needed else where, pull out as a function
4936        points_georeference = ensure_geo_reference(points_georeference)
4937        new_origin = ensure_geo_reference(new_origin)
4938        if new_origin is None and points_georeference is not None:
4939            points = points_utm
4940            geo_ref = points_georeference
4941        else:
4942            if new_origin is None:
4943                new_origin = Geo_reference(zone,min(points_utm[:,0]),
4944                                           min(points_utm[:,1]))
4945            points = new_origin.change_points_geo_ref(points_utm,
4946                                                      points_georeference)
4947            geo_ref = new_origin
4948
4949        # At this stage I need a georef and points
4950        # the points are relative to the georef
4951        geo_ref.write_NetCDF(outfile)
4952   
4953        # This will put the geo ref in the middle
4954        #geo_ref=Geo_reference(refzone,(max(x)+min(x))/2.0,(max(x)+min(y))/2.)
4955       
4956        x =  points[:,0]
4957        y =  points[:,1]
4958        z = outfile.variables['z'][:]
4959   
4960        if verbose:
4961            print '------------------------------------------------'
4962            print 'More Statistics:'
4963            print '  Extent (/lon):'
4964            print '    x in [%f, %f], len(lat) == %d'\
4965                  %(min(x), max(x),
4966                    len(x))
4967            print '    y in [%f, %f], len(lon) == %d'\
4968                  %(min(y), max(y),
4969                    len(y))
4970            print '    z in [%f, %f], len(z) == %d'\
4971                  %(min(elevation), max(elevation),
4972                    len(elevation))
4973            print 'geo_ref: ',geo_ref
4974            print '------------------------------------------------'
4975           
4976        #z = resize(bath_grid,outfile.variables['z'][:].shape)
4977        outfile.variables['x'][:] = points[:,0] #- geo_ref.get_xllcorner()
4978        outfile.variables['y'][:] = points[:,1] #- geo_ref.get_yllcorner()
4979        outfile.variables['z'][:] = elevation
4980        outfile.variables['elevation'][:] = elevation  #FIXME HACK
4981        outfile.variables['volumes'][:] = volumes.astype(Int32) #On Opteron 64
4982
4983        q = 'elevation'
4984        # This updates the _range values
4985        outfile.variables[q+Write_sww.RANGE][0] = min(elevation)
4986        outfile.variables[q+Write_sww.RANGE][1] = max(elevation)
4987
4988    def quantities(self, outfile, precision=Float,
4989                             slice_index=None, time=None,
4990                             verbose=False, **quant):
4991        """
4992        Write the quantity info.
4993
4994        **quant is extra keyword arguments passed in. These must be
4995          the sww quantities, currently; stage, xmomentum, ymomentum.
4996       
4997        if the time array is already been built, use the slice_index
4998        to specify the index.
4999       
5000        Otherwise, use time to increase the time dimension
5001
5002        Maybe make this general, but the viewer assumes these quantities,
5003        so maybe we don't want it general - unless the viewer is general
5004       
5005        precon
5006        triangulation and
5007        header have been called.
5008        """
5009
5010        if time is not None:
5011            file_time = outfile.variables['time']
5012            slice_index = len(file_time)
5013            file_time[slice_index] = time   
5014
5015        # write the conserved quantities from Doamin.
5016        # Typically stage,  xmomentum, ymomentum
5017        # other quantities will be ignored, silently.
5018        for q in Write_sww.sww_quantities:
5019            if not quant.has_key(q):
5020                msg = 'SWW file can not write quantity %s' %q
5021                raise NewQuantity, msg
5022            else:
5023                q_values = quant[q]
5024                outfile.variables[q][slice_index] = q_values.astype(precision)
5025
5026                # This updates the _range values
5027                q_range = outfile.variables[q+Write_sww.RANGE][:]
5028                q_values_min = min(q_values)
5029                if q_values_min < q_range[0]:
5030                    outfile.variables[q+Write_sww.RANGE][0] = q_values_min
5031                q_values_max = max(q_values)
5032                if q_values_max > q_range[1]:
5033                    outfile.variables[q+Write_sww.RANGE][1] = q_values_max
5034
5035    def verbose_quantities(self, outfile):
5036        print '------------------------------------------------'
5037        print 'More Statistics:'
5038        for q in Write_sww.sww_quantities:
5039            print %s in [%f, %f]' %(q
5040                                       ,outfile.variables[q+Write_sww.RANGE][0]
5041                                       ,outfile.variables[q+Write_sww.RANGE][1]
5042                                       )
5043        print '------------------------------------------------'
5044       
5045def obsolete_write_sww_time_slices(outfile, has, uas, vas, elevation,
5046                         mean_stage=0, zscale=1,
5047                         verbose=False):   
5048    #Time stepping
5049    stage = outfile.variables['stage']
5050    xmomentum = outfile.variables['xmomentum']
5051    ymomentum = outfile.variables['ymomentum']
5052
5053    n = len(has)
5054    j=0
5055    for ha, ua, va in map(None, has, uas, vas):
5056        if verbose and j%((n+10)/10)==0: print '  Doing %d of %d' %(j, n)
5057        w = zscale*ha + mean_stage
5058        stage[j] = w
5059        h = w - elevation
5060        xmomentum[j] = ua*h
5061        ymomentum[j] = -1*va*#  -1 since in mux files south is positive.
5062        j += 1
5063   
5064def urs2txt(basename_in, location_index=None):
5065    """
5066    Not finished or tested
5067    """
5068   
5069    files_in = [basename_in + WAVEHEIGHT_MUX_LABEL,
5070                basename_in + EAST_VELOCITY_LABEL,
5071                basename_in + NORTH_VELOCITY_LABEL]
5072    quantities = ['HA','UA','VA']
5073
5074    d = ","
5075   
5076    # instanciate urs_points of the three mux files.
5077    mux = {}
5078    for quantity, file in map(None, quantities, files_in):
5079        mux[quantity] = Urs_points(file)
5080       
5081    # Could check that the depth is the same. (hashing)
5082
5083    # handle to a mux file to do depth stuff
5084    a_mux = mux[quantities[0]]
5085   
5086    # Convert to utm
5087    latitudes = a_mux.lonlatdep[:,1]
5088    longitudes = a_mux.lonlatdep[:,0]
5089    points_utm, zone = convert_from_latlon_to_utm( \
5090        latitudes=latitudes, longitudes=longitudes)
5091    #print "points_utm", points_utm
5092    #print "zone", zone
5093    depths = a_mux.lonlatdep[:,2]  #
5094   
5095    fid = open(basename_in + '.txt', 'w')
5096
5097    fid.write("zone: " + str(zone) + "\n")
5098
5099    if location_index is not None:
5100        #Title
5101        li = location_index
5102        fid.write('location_index'+d+'lat'+d+ 'long' +d+ 'Easting' +d+ \
5103                  'Northing' + "\n")
5104        fid.write(str(li) +d+ str(latitudes[li])+d+ \
5105              str(longitudes[li]) +d+ str(points_utm[li][0]) +d+ \
5106              str(points_utm[li][01]) + "\n")
5107
5108    # the non-time dependent stuff
5109    #Title
5110    fid.write('location_index'+d+'lat'+d+ 'long' +d+ 'Easting' +d+ \
5111                  'Northing' +d+ 'depth m' + "\n")
5112    i = 0
5113    for depth, point_utm, lat, long in map(None, depths,
5114                                               points_utm, latitudes,
5115                                               longitudes):
5116       
5117        fid.write(str(i) +d+ str(lat)+d+ str(long) +d+ str(point_utm[0]) +d+ \
5118                  str(point_utm[01]) +d+ str(depth) + "\n")
5119        i +=1
5120    #Time dependent
5121    if location_index is not None:
5122        time_step = a_mux.time_step
5123        i = 0
5124        #Title
5125        fid.write('time' +d+ 'HA depth m'+d+ \
5126                 'UA momentum East x m/sec' +d+ 'VA momentum North y m/sec' \
5127                      + "\n")
5128        for HA, UA, VA in map(None, mux['HA'], mux['UA'], mux['VA']):
5129            fid.write(str(i*time_step) +d+ str(HA[location_index])+d+ \
5130                      str(UA[location_index]) +d+ str(VA[location_index]) \
5131                      + "\n")
5132           
5133            i +=1
5134   
5135class Urs_points:
5136    """
5137    Read the info in URS mux files.
5138
5139    for the quantities heres a correlation between the file names and
5140    what they mean;
5141    z-mux is height above sea level, m
5142    e-mux is velocity is Eastern direction, m/s
5143    n-mux is velocity is Northern direction, m/s   
5144    """
5145    def __init__(self,urs_file):
5146        self.iterated = False
5147        columns = 3 # long, lat , depth
5148        mux_file = open(urs_file, 'rb')
5149       
5150        # Number of points/stations
5151        (self.points_num,)= unpack('i',mux_file.read(4))
5152       
5153        # nt, int - Number of time steps
5154        (self.time_step_count,)= unpack('i',mux_file.read(4))
5155        #print "self.time_step_count", self.time_step_count
5156        #dt, float - time step, seconds
5157        (self.time_step,) = unpack('f', mux_file.read(4))
5158        #print "self.time_step", self.time_step
5159        msg = "Bad data in the urs file."
5160        if self.points_num < 0:
5161            mux_file.close()
5162            raise ANUGAError, msg
5163        if self.time_step_count < 0:
5164            mux_file.close()
5165            raise ANUGAError, msg
5166        if self.time_step < 0:
5167            mux_file.close()
5168            raise ANUGAError, msg
5169
5170        # the depth is in meters, and it is the distance from the ocean
5171        # to the sea bottom.
5172        lonlatdep = p_array.array('f')
5173        lonlatdep.read(mux_file, columns * self.points_num)
5174        lonlatdep = array(lonlatdep, typecode=Float)   
5175        lonlatdep = reshape(lonlatdep, (self.points_num, columns))
5176        #print 'lonlatdep',lonlatdep
5177        self.lonlatdep = lonlatdep
5178       
5179        self.mux_file = mux_file
5180        # check this array
5181
5182    def __iter__(self):
5183        """
5184        iterate over quantity data which is with respect to time.
5185
5186        Note: You can only interate once over an object
5187       
5188        returns quantity infomation for each time slice
5189        """
5190        msg =  "You can only interate once over a urs file."
5191        assert not self.iterated, msg
5192        self.iter_time_step = 0
5193        self.iterated = True
5194        return self
5195   
5196    def next(self):
5197        if self.time_step_count == self.iter_time_step:
5198            self.close()
5199            raise StopIteration
5200        #Read in a time slice  from mux file 
5201        hz_p_array = p_array.array('f')
5202        hz_p_array.read(self.mux_file, self.points_num)
5203        hz_p = array(hz_p_array, typecode=Float)
5204        self.iter_time_step += 1
5205       
5206        return hz_p
5207
5208    def close(self):
5209        self.mux_file.close()
5210       
5211    #### END URS UNGRIDDED 2 SWW ###
5212
5213       
5214def start_screen_catcher(dir_name, myid='', numprocs='', extra_info='',
5215                         print_to_screen=False, verbose=False):
5216    """
5217    Used to store screen output and errors to file, if run on multiple
5218    processes eachprocessor will have its own output and error file.
5219   
5220    extra_info - is used as a string that can identify outputs with another
5221    string eg. '_other'
5222    """
5223    import sys
5224    dir_name = dir_name
5225    if access(dir_name,W_OK) == 0:
5226        if verbose: print 'Make directory %s' %dir_name
5227        if verbose: print "myid", myid
5228        mkdir (dir_name,0777)
5229    if myid <>'':
5230        myid = '_'+str(myid)
5231    if numprocs <>'':
5232        numprocs = '_'+str(numprocs)
5233    if extra_info <>'':
5234        extra_info = '_'+str(extra_info)
5235    screen_output_name = dir_name + "screen_output%s%s%s.txt" %(myid,
5236                                                                numprocs,
5237                                                                extra_info)
5238    screen_error_name = dir_name + "screen_error%s%s%s.txt" %(myid,
5239                                                              numprocs,
5240                                                              extra_info)
5241    print screen_output_name
5242    #used to catch screen output to file
5243    sys.stdout = Screen_Catcher(screen_output_name)
5244    sys.stderr = Screen_Catcher(screen_error_name)
5245
5246class Screen_Catcher:
5247    """this simply catches the screen output and stores it to file defined by
5248    start_screen_catcher (above)
5249    """
5250   
5251    def __init__(self, filename):
5252        self.filename = filename
5253 
5254        if exists(self.filename)is True:
5255            print'Old existing file "%s" has been deleted' %(self.filename)
5256            remove(self.filename)
5257
5258    def write(self, stuff):
5259        fid = open(self.filename, 'a')
5260        fid.write(stuff)
5261#        if print_to_screen: print stuff
5262
5263def copy_code_files(dir_name, filename1, filename2):
5264    """Copies "filename1" and "filename2" to "dir_name". Very useful for
5265    information management
5266    filename1 and filename2 are both absolute pathnames   
5267    """
5268
5269    if access(dir_name,F_OK) == 0:
5270        print 'Make directory %s' %dir_name
5271        mkdir (dir_name,0777)
5272    shutil.copy(filename1, dir_name + sep + basename(filename1))
5273    shutil.copy(filename2, dir_name + sep + basename(filename2))
5274#    copy (__file__, project.output_run_time_dir + basename(__file__))
5275    print 'Files %s and %s copied' %(filename1, filename2)
5276
5277def get_data_from_file(filename,separator_value = ','):
5278    """
5279    Read in data information from file and
5280   
5281    Returns:
5282        header_fields, a string? of the first line separated
5283        by the 'separator_value'
5284       
5285        data, a array (N data columns X M lines) in the file
5286        excluding the header
5287       
5288    NOTE: wont deal with columns with different lenghts and there must be
5289    no blank lines at the end.
5290    """
5291   
5292    fid = open(filename)
5293    lines = fid.readlines()
5294   
5295    fid.close()
5296   
5297    header_line = lines[0]
5298    header_fields = header_line.split(separator_value)
5299
5300    #array to store data, number in there is to allow float...
5301    #i'm sure there is a better way!
5302    data=array([],typecode=Float)
5303    data=resize(data,((len(lines)-1),len(header_fields)))
5304#    print 'number of fields',range(len(header_fields))
5305#    print 'number of lines',len(lines), shape(data)
5306#    print'data',data[1,1],header_line
5307
5308    array_number = 0
5309    line_number = 1
5310    while line_number < (len(lines)):
5311        for i in range(len(header_fields)): 
5312            #this get line below the header, explaining the +1
5313            #and also the line_number can be used as the array index
5314            fields = lines[line_number].split(separator_value)
5315            #assign to array
5316            data[array_number,i] = float(fields[i])
5317           
5318        line_number = line_number +1
5319        array_number = array_number +1
5320       
5321    return header_fields, data
5322
5323def store_parameters(verbose=False,**kwargs):
5324    """
5325    Store "kwargs" into a temp csv file, if "completed" is a kwargs csv file is
5326    kwargs[file_name] else it is kwargs[output_dir] + details_temp.csv
5327   
5328    Must have a file_name keyword arg, this is what is writing to.
5329    might be a better way to do this using CSV module Writer and writeDict
5330   
5331    writes file to "output_dir" unless "completed" is in kwargs, then
5332    it writes to "file_name" kwargs
5333
5334    """
5335    import types
5336#    import os
5337   
5338    # Check that kwargs is a dictionary
5339    if type(kwargs) != types.DictType:
5340        raise TypeError
5341   
5342    #is completed is kwargs?
5343    try:
5344        kwargs['completed']
5345        completed=True
5346    except:
5347        completed=False
5348 
5349    #get file name and removes from dict and assert that a file_name exists
5350    if completed:
5351        try:
5352            file = str(kwargs['file_name'])
5353        except:
5354            raise 'kwargs must have file_name'
5355    else:
5356        #write temp file in output directory
5357        try:
5358            file = str(kwargs['output_dir'])+'detail_temp.csv'
5359        except:
5360            raise 'kwargs must have output_dir'
5361       
5362    #extracts the header info and the new line info
5363    line=''
5364    header=''
5365    count=0
5366    keys = kwargs.keys()
5367    keys.sort()
5368   
5369    #used the sorted keys to create the header and line data
5370    for k in keys:
5371#        print "%s = %s" %(k, kwargs[k])
5372        header = header+str(k)
5373        line = line+str(kwargs[k])
5374        count+=1
5375        if count <len(kwargs):
5376            header = header+','
5377            line = line+','
5378    header+='\n'
5379    line+='\n'
5380
5381    # checks the header info, if the same, then write, if not create a new file
5382    #try to open!
5383    try:
5384        fid = open(file,"r")
5385        file_header=fid.readline()
5386        fid.close()
5387        if verbose: print 'read file header %s' %file_header
5388       
5389    except:
5390        msg = 'try to create new file',file
5391        if verbose: print msg
5392        #tries to open file, maybe directory is bad
5393        try:
5394            fid = open(file,"w")
5395            fid.write(header)
5396            fid.close()
5397            file_header=header
5398        except:
5399            msg = 'cannot create new file',file
5400            raise msg
5401           
5402    #if header is same or this is a new file
5403    if file_header==str(header):
5404        fid=open(file,"a")
5405        #write new line
5406        fid.write(line)
5407        fid.close()
5408    else:
5409        #backup plan,
5410        # if header is different and has completed will append info to
5411        #end of details_temp.cvs file in output directory
5412        file = str(kwargs['output_dir'])+'detail_temp.csv'
5413        fid=open(file,"a")
5414        fid.write(header)
5415        fid.write(line)
5416        fid.close()
5417        if verbose: print 'file',file_header.strip('\n')
5418        if verbose: print 'head',header.strip('\n')
5419        if file_header.strip('\n')==str(header): print 'they equal'
5420        msg = 'WARNING: File header does not match input info, the input variables have changed, suggest to change file name'
5421        print msg
5422
5423
5424
5425
5426
5427def get_maximum_inundation_elevation(filename,
5428                                     polygon=None,
5429                                     time_interval=None,
5430                                     verbose=False):
5431   
5432    """Return highest elevation where depth > 0
5433   
5434    Usage:
5435    max_runup = get_maximum_inundation_elevation(filename,
5436                                                 polygon=None,
5437                                                 time_interval=None,
5438                                                 verbose=False)
5439
5440    filename is a NetCDF sww file containing ANUGA model output.   
5441    Optional arguments polygon and time_interval restricts the maximum
5442    runup calculation
5443    to a points that lie within the specified polygon and time interval.
5444
5445    If no inundation is found within polygon and time_interval the return value
5446    is None signifying "No Runup" or "Everything is dry".
5447
5448    See general function get_maximum_inundation_data for details.
5449   
5450    """
5451   
5452    runup, _ = get_maximum_inundation_data(filename,
5453                                           polygon=polygon,
5454                                           time_interval=time_interval,
5455                                           verbose=verbose)
5456    return runup
5457
5458
5459
5460
5461def get_maximum_inundation_location(filename,
5462                                    polygon=None,
5463                                    time_interval=None,
5464                                    verbose=False):
5465    """Return location of highest elevation where h > 0
5466   
5467   
5468    Usage:
5469    max_runup_location = get_maximum_inundation_location(filename,
5470                                                         polygon=None,
5471                                                         time_interval=None,
5472                                                         verbose=False)
5473
5474    filename is a NetCDF sww file containing ANUGA model output.
5475    Optional arguments polygon and time_interval restricts the maximum
5476    runup calculation
5477    to a points that lie within the specified polygon and time interval.
5478
5479    If no inundation is found within polygon and time_interval the return value
5480    is None signifying "No Runup" or "Everything is dry".
5481
5482    See general function get_maximum_inundation_data for details.
5483    """
5484   
5485    _, max_loc = get_maximum_inundation_data(filename,
5486                                             polygon=polygon,
5487                                             time_interval=time_interval,
5488                                             verbose=verbose)
5489    return max_loc
5490   
5491
5492
5493def get_maximum_inundation_data(filename, polygon=None, time_interval=None,
5494                                use_centroid_values=False,
5495                                verbose=False):
5496    """Compute maximum run up height from sww file.
5497
5498
5499    Usage:
5500    runup, location = get_maximum_inundation_data(filename,
5501                                                  polygon=None,
5502                                                  time_interval=None,
5503                                                  verbose=False)
5504   
5505
5506    Algorithm is as in get_maximum_inundation_elevation from
5507    shallow_water_domain
5508    except that this function works with the sww file and computes the maximal
5509    runup height over multiple timesteps.
5510   
5511    Optional arguments polygon and time_interval restricts the
5512    maximum runup calculation
5513    to a points that lie within the specified polygon and time interval.
5514    Polygon is
5515    assumed to be in (absolute) UTM coordinates in the same zone as domain.
5516
5517    If no inundation is found within polygon and time_interval the return value
5518    is None signifying "No Runup" or "Everything is dry".
5519    """
5520
5521    # We are using nodal values here as that is what is stored in sww files.
5522
5523    # Water depth below which it is considered to be 0 in the model
5524    # FIXME (Ole): Allow this to be specified as a keyword argument as well
5525
5526    from anuga.utilities.polygon import inside_polygon   
5527    from anuga.config import minimum_allowed_height
5528    from Scientific.IO.NetCDF import NetCDFFile
5529
5530    dir, base = os.path.split(filename)
5531           
5532    iterate_over = get_all_swwfiles(dir,base)
5533   
5534    # Read sww file
5535    if verbose: 
5536        print 'Reading from %s' %filename
5537        # FIXME: Use general swwstats (when done)
5538   
5539    maximal_runup = None
5540    maximal_runup_location = None
5541   
5542    for file, swwfile in enumerate (iterate_over):
5543       
5544        # Read sww file
5545        filename = join(dir,swwfile+'.sww')
5546       
5547        if verbose: 
5548            print 'Reading from %s' %filename
5549            # FIXME: Use general swwstats (when done)
5550               
5551        fid = NetCDFFile(filename)
5552   
5553        # Get geo_reference
5554        # sww files don't have to have a geo_ref
5555        try:
5556            geo_reference = Geo_reference(NetCDFObject=fid)
5557        except AttributeError, e:
5558            geo_reference = Geo_reference() # Default georef object
5559           
5560        xllcorner = geo_reference.get_xllcorner()
5561        yllcorner = geo_reference.get_yllcorner()
5562        zone = geo_reference.get_zone()
5563       
5564        # Get extent
5565        volumes = fid.variables['volumes'][:]   
5566        x = fid.variables['x'][:] + xllcorner
5567        y = fid.variables['y'][:] + yllcorner
5568   
5569   
5570        # Get the relevant quantities
5571        elevation = fid.variables['elevation'][:] 
5572        stage = fid.variables['stage'][:]
5573   
5574   
5575        # Here's where one could convert nodal information to centroid
5576        # information
5577        # but is probably something we need to write in C.
5578        # Here's a Python thought which is NOT finished!!!
5579        if use_centroid_values is True:
5580            x = get_centroid_values(x, volumes)
5581            y = get_centroid_values(y, volumes)   
5582            elevation = get_centroid_values(elevation, volumes)   
5583   
5584   
5585        # Spatial restriction
5586        if polygon is not None:
5587            msg = 'polygon must be a sequence of points.'
5588            assert len(polygon[0]) == 2, msg
5589            # FIXME (Ole): Make a generic polygon input check in polygon.py
5590            # and call it here
5591           
5592            points = concatenate((x[:,NewAxis], y[:,NewAxis]), axis=1)
5593   
5594            point_indices = inside_polygon(points, polygon)
5595   
5596            # Restrict quantities to polygon
5597            elevation = take(elevation, point_indices)
5598            stage = take(stage, point_indices, axis=1)
5599   
5600            # Get info for location of maximal runup
5601            points_in_polygon = take(points, point_indices)
5602            x = points_in_polygon[:,0]
5603            y = points_in_polygon[:,1]       
5604        else:
5605            # Take all points
5606            point_indices = arange(len(x))
5607           
5608   
5609        # Temporal restriction
5610        time = fid.variables['time'][:]
5611        all_timeindices = arange(len(time))       
5612        if time_interval is not None:
5613           
5614            msg = 'time_interval must be a sequence of length 2.'
5615            assert len(time_interval) == 2, msg
5616            msg = 'time_interval %s must not be decreasing.' %(time_interval)
5617            assert time_interval[1] >= time_interval[0], msg
5618           
5619            msg = 'Specified time interval [%.8f:%.8f]' %tuple(time_interval)
5620            msg += ' must does not match model time interval: [%.8f, %.8f]\n'\
5621                   %(time[0], time[-1])
5622            if time_interval[1] < time[0]: raise ValueError(msg)
5623            if time_interval[0] > time[-1]: raise ValueError(msg)
5624   
5625            # Take time indices corresponding to interval (& is bitwise AND)
5626            timesteps = compress((time_interval[0] <= time) & (time <= time_interval[1]),
5627                                 all_timeindices)
5628   
5629   
5630            msg = 'time_interval %s did not include any model timesteps.' %(time_interval)       
5631            assert not alltrue(timesteps == 0), msg
5632   
5633   
5634        else:
5635            # Take them all
5636            timesteps = all_timeindices
5637       
5638   
5639        fid.close()
5640   
5641        # Compute maximal runup for each timestep
5642        #maximal_runup = None
5643        #maximal_runup_location = None
5644        #maximal_runups = [None]
5645        #maximal_runup_locations = [None]
5646       
5647        for i in timesteps:
5648            if use_centroid_values is True:
5649                stage_i = get_centroid_values(stage[i,:], volumes)   
5650            else:
5651                stage_i = stage[i,:]
5652               
5653            depth = stage_i  - elevation
5654       
5655            # Get wet nodes i.e. nodes with depth>0 within given region and timesteps
5656            wet_nodes = compress(depth > minimum_allowed_height, arange(len(depth)))
5657   
5658            if alltrue(wet_nodes == 0):
5659                runup = None
5660            else:   
5661                # Find maximum elevation among wet nodes
5662                wet_elevation = take(elevation, wet_nodes)
5663   
5664                runup_index = argmax(wet_elevation)
5665                runup = max(wet_elevation)
5666                assert wet_elevation[runup_index] == runup # Must always be True
5667            if runup > maximal_runup:
5668                maximal_runup = runup      # This works even if maximal_runups is None
5669                #print "NEW RUNUP",runup
5670   
5671                # Record location
5672                wet_x = take(x, wet_nodes)
5673                wet_y = take(y, wet_nodes)           
5674                maximal_runup_location = [wet_x[runup_index], wet_y[runup_index]]
5675   
5676    #print 'maximal_runup, maximal_runup_location',maximal_runup, maximal_runup_location
5677    return maximal_runup, maximal_runup_location
5678
5679def get_all_swwfiles(look_in_dir='',base_name='',verbose=False):
5680    '''
5681    Finds all the sww files in a "look_in_dir" which contains a "base_name".
5682    will accept base_name with or without the extension ".sww"
5683   
5684    Returns: a list of strings
5685       
5686    Usage:     iterate_over = get_all_swwfiles(dir, name)
5687    then
5688               for swwfile in iterate_over:
5689                   do stuff
5690                   
5691    Check "export_grids" and "get_maximum_inundation_data" for examples
5692    '''
5693   
5694    #plus tests the extension
5695    name, extension = os.path.splitext(base_name)
5696
5697    if extension <>'' and extension <> '.sww':
5698        msg = msg='file %s %s must be an NetCDF sww file!'%(base_name,extension)
5699        raise IOError, msg
5700
5701    if look_in_dir == "":
5702        look_in_dir = "." # Unix compatibility
5703   
5704    dir_ls = os.listdir(look_in_dir)
5705    #print 'dir_ls',dir_ls, base
5706    iterate_over = [x[:-4] for x in dir_ls if name in x and x[-4:] == '.sww']
5707    if len(iterate_over) == 0:
5708        msg = 'No files of the base name %s'\
5709              %(name)
5710        raise IOError, msg
5711    if verbose: print 'iterate over %s' %(iterate_over)
5712
5713    return iterate_over
5714
5715def get_all_files_with_extension(look_in_dir='',base_name='',extension='.sww',verbose=False):
5716    '''
5717    Finds all the sww files in a "look_in_dir" which contains a "base_name".
5718   
5719   
5720    Returns: a list of strings
5721       
5722    Usage:     iterate_over = get_all_swwfiles(dir, name)
5723    then
5724               for swwfile in iterate_over:
5725                   do stuff
5726                   
5727    Check "export_grids" and "get_maximum_inundation_data" for examples
5728    '''
5729   
5730    #plus tests the extension
5731    name, ext = os.path.splitext(base_name)
5732#    print 'look_in_dir',look_in_dir
5733
5734    if ext <>'' and ext <> extension:
5735        msg = msg='base_name %s must be an file with %s extension!'%(base_name,extension)
5736        raise IOError, msg
5737
5738    if look_in_dir == "":
5739        look_in_dir = "." # Unix compatibility
5740#    print 'look_in_dir',look_in_dir, getcwd()
5741    dir_ls = os.listdir(look_in_dir)
5742    #print 'dir_ls',dir_ls, base_name
5743    iterate_over = [x[:-4] for x in dir_ls if name in x and x[-4:] == extension]
5744    if len(iterate_over) == 0:
5745        msg = 'No files of the base name %s in %s'\
5746              %(name, look_in_dir)
5747        raise IOError, msg
5748    if verbose: print 'iterate over %s' %(iterate_over)
5749
5750    return iterate_over
5751
5752def get_all_directories_with_name(look_in_dir='',base_name='',verbose=False):
5753    '''
5754    Finds all the sww files in a "look_in_dir" which contains a "base_name".
5755   
5756   
5757    Returns: a list of strings
5758       
5759    Usage:     iterate_over = get_all_swwfiles(dir, name)
5760    then
5761               for swwfile in iterate_over:
5762                   do stuff
5763                   
5764    Check "export_grids" and "get_maximum_inundation_data" for examples
5765    '''
5766   
5767    #plus tests the extension
5768
5769    if look_in_dir == "":
5770        look_in_dir = "." # Unix compatibility
5771#    print 'look_in_dir',look_in_dir
5772    dir_ls = os.listdir(look_in_dir)
5773#    print 'dir_ls',dir_ls
5774    iterate_over = [x for x in dir_ls if base_name in x]
5775    if len(iterate_over) == 0:
5776        msg = 'No files of the base name %s'\
5777              %(name)
5778        raise IOError, msg
5779    if verbose: print 'iterate over %s' %(iterate_over)
5780
5781    return iterate_over
5782
5783
5784#-------------------------------------------------------------
5785if __name__ == "__main__":
5786    #setting umask from config to force permissions for all files and directories
5787    # created to the same. (it was noticed the "mpirun" doesn't honour the umask
5788    # set in your .bashrc etc file)
5789    from config import umask
5790    import os 
5791    os.umask(umask)
5792
Note: See TracBrowser for help on using the repository browser.