source: anuga_core/source/anuga/shallow_water/data_manager.py @ 4605

Last change on this file since 4605 was 4603, checked in by nick, 18 years ago

testing get_all_sww_file

File size: 185.8 KB
Line 
1"""datamanager.py - input output for AnuGA
2
3
4This module takes care of reading and writing datafiles such as topograhies,
5model output, etc
6
7
8Formats used within AnuGA:
9
10.sww: Netcdf format for storing model output f(t,x,y)
11.tms: Netcdf format for storing time series f(t)
12
13.xya: ASCII format for storing arbitrary points and associated attributes
14.pts: NetCDF format for storing arbitrary points and associated attributes
15
16.asc: ASCII format of regular DEMs as output from ArcView
17.prj: Associated ArcView file giving more meta data for asc format
18.ers: ERMapper header format of regular DEMs for ArcView
19
20.dem: NetCDF representation of regular DEM data
21
22.tsh: ASCII format for storing meshes and associated boundary and region info
23.msh: NetCDF format for storing meshes and associated boundary and region info
24
25.nc: Native ferret NetCDF format
26.geo: Houdinis ascii geometry format (?)
27
28
29A typical dataflow can be described as follows
30
31Manually created files:
32ASC, PRJ:     Digital elevation models (gridded)
33TSH:          Triangular meshes (e.g. created from anuga.pmesh)
34NC            Model outputs for use as boundary conditions (e.g from MOST)
35
36
37AUTOMATICALLY CREATED FILES:
38
39ASC, PRJ  ->  DEM  ->  PTS: Conversion of DEM's to native pts file
40
41NC -> SWW: Conversion of MOST bundary files to boundary sww
42
43PTS + TSH -> TSH with elevation: Least squares fit
44
45TSH -> SWW:  Conversion of TSH to sww viewable using Swollen
46
47TSH + Boundary SWW -> SWW: Simluation using abstract_2d_finite_volumes
48
49"""
50
51import exceptions
52class TitleValueError(exceptions.Exception): pass
53class DataMissingValuesError(exceptions.Exception): pass
54class DataFileNotOpenError(exceptions.Exception): pass
55class DataTimeError(exceptions.Exception): pass
56class DataDomainError(exceptions.Exception): pass
57class NewQuantity(exceptions.Exception): pass
58
59
60
61import csv
62import os, sys
63import shutil
64from struct import unpack
65import array as p_array
66#import time, os
67from os import sep, path, remove, mkdir, access, F_OK, W_OK, getcwd
68
69
70from Numeric import concatenate, array, Float, Int, Int32, resize, sometrue, \
71     searchsorted, zeros, allclose, around, reshape, transpose, sort, \
72     NewAxis, ArrayType, compress, take, arange, argmax, alltrue,shape,Float
73
74import string
75
76from Scientific.IO.NetCDF import NetCDFFile
77#from shutil import copy
78from os.path import exists, basename, join
79
80
81from anuga.coordinate_transforms.redfearn import redfearn, \
82     convert_from_latlon_to_utm
83from anuga.coordinate_transforms.geo_reference import Geo_reference, \
84     write_NetCDF_georeference, ensure_geo_reference
85from anuga.geospatial_data.geospatial_data import Geospatial_data,\
86     ensure_absolute
87from anuga.config import minimum_storable_height as default_minimum_storable_height
88from anuga.utilities.numerical_tools import ensure_numeric,  mean
89from anuga.caching.caching import myhash
90from anuga.utilities.anuga_exceptions import ANUGAError
91from anuga.shallow_water import Domain
92from anuga.abstract_2d_finite_volumes.pmesh2domain import \
93     pmesh_to_domain_instance
94from anuga.abstract_2d_finite_volumes.util import get_revision_number, \
95     remove_lone_verts, sww2timeseries, get_centroid_values
96from anuga.load_mesh.loadASCII import export_mesh_file
97# formula mappings
98
99quantity_formula = {'momentum':'(xmomentum**2 + ymomentum**2)**0.5',
100                    'depth':'stage-elevation',
101                    'speed': \
102 '(xmomentum**2 + ymomentum**2)**0.5/(stage-elevation+1.e-6/(stage-elevation))'}
103
104
105   
106def make_filename(s):
107    """Transform argument string into a suitable filename
108    """
109
110    s = s.strip()
111    s = s.replace(' ', '_')
112    s = s.replace('(', '')
113    s = s.replace(')', '')
114    s = s.replace('__', '_')
115
116    return s
117
118
119def check_dir(path, verbose=None):
120    """Check that specified path exists.
121    If path does not exist it will be created if possible
122
123    USAGE:
124       checkdir(path, verbose):
125
126    ARGUMENTS:
127        path -- Directory
128        verbose -- Flag verbose output (default: None)
129
130    RETURN VALUE:
131        Verified path including trailing separator
132
133    """
134
135    import os.path
136
137    if sys.platform in ['nt', 'dos', 'win32', 'what else?']:
138        unix = 0
139    else:
140        unix = 1
141
142
143    if path[-1] != os.sep:
144        path = path + os.sep  # Add separator for directories
145
146    path = os.path.expanduser(path) # Expand ~ or ~user in pathname
147    if not (os.access(path,os.R_OK and os.W_OK) or path == ''):
148        try:
149            exitcode=os.mkdir(path)
150
151            # Change access rights if possible
152            #
153            if unix:
154                exitcode=os.system('chmod 775 '+path)
155            else:
156                pass  # FIXME: What about acces rights under Windows?
157
158            if verbose: print 'MESSAGE: Directory', path, 'created.'
159
160        except:
161            print 'WARNING: Directory', path, 'could not be created.'
162            if unix:
163                path = '/tmp/'
164            else:
165                path = 'C:'
166
167            print 'Using directory %s instead' %path
168
169    return(path)
170
171
172
173def del_dir(path):
174    """Recursively delete directory path and all its contents
175    """
176
177    import os
178
179    if os.path.isdir(path):
180        for file in os.listdir(path):
181            X = os.path.join(path, file)
182
183
184            if os.path.isdir(X) and not os.path.islink(X):
185                del_dir(X)
186            else:
187                try:
188                    os.remove(X)
189                except:
190                    print "Could not remove file %s" %X
191
192        os.rmdir(path)
193       
194       
195# ANOTHER OPTION, IF NEED IN THE FUTURE, Nick B 7/2007   
196def rmgeneric(path, __func__,verbose=False):
197    ERROR_STR= """Error removing %(path)s, %(error)s """
198
199    try:
200        __func__(path)
201        if verbose: print 'Removed ', path
202    except OSError, (errno, strerror):
203        print ERROR_STR % {'path' : path, 'error': strerror }
204           
205def removeall(path,verbose=False):
206
207    if not os.path.isdir(path):
208        return
209   
210    files=os.listdir(path)
211
212    for x in files:
213        fullpath=os.path.join(path, x)
214        if os.path.isfile(fullpath):
215            f=os.remove
216            rmgeneric(fullpath, f)
217        elif os.path.isdir(fullpath):
218            removeall(fullpath)
219            f=os.rmdir
220            rmgeneric(fullpath, f,verbose)
221
222
223
224def create_filename(datadir, filename, format, size=None, time=None):
225
226    import os
227    #from anuga.config import data_dir
228
229    FN = check_dir(datadir) + filename
230
231    if size is not None:
232        FN += '_size%d' %size
233
234    if time is not None:
235        FN += '_time%.2f' %time
236
237    FN += '.' + format
238    return FN
239
240
241def get_files(datadir, filename, format, size):
242    """Get all file (names) with given name, size and format
243    """
244
245    import glob
246
247    import os
248    #from anuga.config import data_dir
249
250    dir = check_dir(datadir)
251
252    pattern = dir + os.sep + filename + '_size=%d*.%s' %(size, format)
253    return glob.glob(pattern)
254
255
256
257#Generic class for storing output to e.g. visualisation or checkpointing
258class Data_format:
259    """Generic interface to data formats
260    """
261
262
263    def __init__(self, domain, extension, mode = 'w'):
264        assert mode in ['r', 'w', 'a'], '''Mode %s must be either:''' %mode +\
265                                        '''   'w' (write)'''+\
266                                        '''   'r' (read)''' +\
267                                        '''   'a' (append)'''
268
269        #Create filename
270        self.filename = create_filename(domain.get_datadir(),
271                                        domain.get_name(), extension)
272
273        #print 'F', self.filename
274        self.timestep = 0
275        self.domain = domain
276       
277
278
279        # Exclude ghosts in case this is a parallel domain
280        self.number_of_nodes = domain.number_of_full_nodes       
281        self.number_of_volumes = domain.number_of_full_triangles
282        #self.number_of_volumes = len(domain)       
283
284
285
286
287        #FIXME: Should we have a general set_precision function?
288
289
290
291#Class for storing output to e.g. visualisation
292class Data_format_sww(Data_format):
293    """Interface to native NetCDF format (.sww) for storing model output
294
295    There are two kinds of data
296
297    1: Constant data: Vertex coordinates and field values. Stored once
298    2: Variable data: Conserved quantities. Stored once per timestep.
299
300    All data is assumed to reside at vertex locations.
301    """
302
303
304    def __init__(self, domain, mode = 'w',\
305                 max_size = 2000000000,
306                 recursion = False):
307        from Scientific.IO.NetCDF import NetCDFFile
308        from Numeric import Int, Float, Float32
309
310        self.precision = Float32 #Use single precision for quantities
311        if hasattr(domain, 'max_size'):
312            self.max_size = domain.max_size #file size max is 2Gig
313        else:
314            self.max_size = max_size
315        self.recursion = recursion
316        self.mode = mode
317
318        Data_format.__init__(self, domain, 'sww', mode)
319
320        if hasattr(domain, 'minimum_storable_height'):
321            self.minimum_storable_height =  domain.minimum_storable_height
322        else:
323            self.minimum_storable_height = default_minimum_storable_height
324
325        # NetCDF file definition
326        fid = NetCDFFile(self.filename, mode)
327
328        if mode == 'w':
329            description = 'Output from anuga.abstract_2d_finite_volumes suitable for plotting'
330            self.writer = Write_sww()
331            self.writer.header(fid, domain.starttime,
332                                         self.number_of_volumes,
333                                         self.domain.number_of_full_nodes,
334                                         description=description,
335                                         smoothing=domain.smooth,
336                                         order=domain.default_order)
337            if hasattr(domain, 'texture'):
338                fid.texture = domain.texture               
339    #        if domain.geo_reference is not None:
340    #            domain.geo_reference.write_NetCDF(fid)
341           
342#             fid.institution = 'Geoscience Australia'
343#             fid.description = 'Output from anuga.abstract_2d_finite_volumes suitable for plotting'
344
345#             if domain.smooth:
346#                 fid.smoothing = 'Yes'
347#             else:
348#                 fid.smoothing = 'No'
349
350#             fid.order = domain.default_order
351
352#             #Reference point
353#             #Start time in seconds since the epoch (midnight 1/1/1970)
354#             #FIXME: Use Georef
355#             fid.starttime = domain.starttime
356
357#             # dimension definitions
358#             fid.createDimension('number_of_volumes', self.number_of_volumes)
359#             fid.createDimension('number_of_vertices', 3)
360
361#             if domain.smooth is True:
362#                 #fid.createDimension('number_of_points', len(domain.vertexlist))
363#                 fid.createDimension('number_of_points', self.domain.number_of_full_nodes)
364
365#                 # FIXME(Ole): This will cause sww files for paralle domains to
366#                 # have ghost nodes stored (but not used by triangles).
367#                 # To clean this up, we have to change get_vertex_values and friends in
368#                 # quantity.py (but I can't be bothered right now)
369#             else:
370#                 fid.createDimension('number_of_points', 3*self.number_of_volumes)
371
372#             fid.createDimension('number_of_timesteps', None) #extensible
373
374#             # variable definitions
375#             fid.createVariable('x', self.precision, ('number_of_points',))
376#             fid.createVariable('y', self.precision, ('number_of_points',))
377#             fid.createVariable('elevation', self.precision, ('number_of_points',))
378#             if domain.geo_reference is not None:
379#                 domain.geo_reference.write_NetCDF(fid)
380
381#             #FIXME: Backwards compatibility
382#             fid.createVariable('z', self.precision, ('number_of_points',))
383#             #################################
384
385#             fid.createVariable('volumes', Int, ('number_of_volumes',
386#                                                 'number_of_vertices'))
387
388#             fid.createVariable('time', Float,  # Always use full precision lest two timesteps
389#                                              # close to each other may appear as the same step
390#                                ('number_of_timesteps',))
391
392#             fid.createVariable('stage', self.precision,
393#                                ('number_of_timesteps',
394#                                 'number_of_points'))
395
396#             fid.createVariable('xmomentum', self.precision,
397#                                ('number_of_timesteps',
398#                                 'number_of_points'))
399
400#             fid.createVariable('ymomentum', self.precision,
401#                                ('number_of_timesteps',
402#                                 'number_of_points'))
403
404        #Close
405        fid.close()
406
407
408    def store_connectivity(self):
409        """Specialisation of store_connectivity for net CDF format
410
411        Writes x,y,z coordinates of triangles constituting
412        the bed elevation.
413        """
414
415        from Scientific.IO.NetCDF import NetCDFFile
416
417        from Numeric import concatenate, Int
418
419        domain = self.domain
420
421        #Get NetCDF
422        fid = NetCDFFile(self.filename, 'a')  #Open existing file for append
423
424        # Get the variables
425        x = fid.variables['x']
426        y = fid.variables['y']
427        z = fid.variables['elevation']
428
429        volumes = fid.variables['volumes']
430
431        # Get X, Y and bed elevation Z
432        Q = domain.quantities['elevation']
433        X,Y,Z,V = Q.get_vertex_values(xy=True,
434                                      precision=self.precision)
435
436        #
437        points = concatenate( (X[:,NewAxis],Y[:,NewAxis]), axis=1 )
438        self.writer.triangulation(fid, points,
439                                            V.astype(volumes.typecode()),
440                                            Z,
441                                            points_georeference= \
442                                            domain.geo_reference)
443        #
444                               
445#         volumes[:] = V.astype(volumes.typecode())
446#         x[:] = X
447#         y[:] = Y
448#         z[:] = Z
449
450#         #FIXME: Backwards compatibility
451#         z = fid.variables['z']
452#         z[:] = Z
453        ################################
454
455
456
457        #Close
458        fid.close()
459
460    def store_timestep(self, names):
461        """Store time and named quantities to file
462        """
463        from Scientific.IO.NetCDF import NetCDFFile
464        import types
465        from time import sleep
466        from os import stat
467
468        from Numeric import choose
469       
470        #Get NetCDF       
471        retries = 0
472        file_open = False
473        while not file_open and retries < 10:
474            try:
475                fid = NetCDFFile(self.filename, 'a') #Open existing file
476            except IOError:
477                #This could happen if someone was reading the file.
478                #In that case, wait a while and try again
479                msg = 'Warning (store_timestep): File %s could not be opened'\
480                      %self.filename
481                msg += ' - trying step %s again' %self.domain.time
482                print msg
483                retries += 1
484                sleep(1)
485            else:
486                file_open = True
487
488        if not file_open:
489            msg = 'File %s could not be opened for append' %self.filename
490            raise DataFileNotOpenError, msg
491
492
493
494        #Check to see if the file is already too big:
495        time = fid.variables['time']
496        i = len(time)+1
497        file_size = stat(self.filename)[6]
498        file_size_increase =  file_size/i
499        if file_size + file_size_increase > self.max_size*(2**self.recursion):
500            #in order to get the file name and start time correct,
501            #I change the domian.filename and domain.starttime.
502            #This is the only way to do this without changing
503            #other modules (I think).
504
505            #write a filename addon that won't break swollens reader
506            #(10.sww is bad)
507            filename_ext = '_time_%s'%self.domain.time
508            filename_ext = filename_ext.replace('.', '_')
509            #remember the old filename, then give domain a
510            #name with the extension
511            old_domain_filename = self.domain.get_name()
512            if not self.recursion:
513                self.domain.set_name(old_domain_filename+filename_ext)
514
515
516            #change the domain starttime to the current time
517            old_domain_starttime = self.domain.starttime
518            self.domain.starttime = self.domain.time
519
520            #build a new data_structure.
521            next_data_structure=\
522                Data_format_sww(self.domain, mode=self.mode,\
523                                max_size = self.max_size,\
524                                recursion = self.recursion+1)
525            if not self.recursion:
526                print '    file_size = %s'%file_size
527                print '    saving file to %s'%next_data_structure.filename
528            #set up the new data_structure
529            self.domain.writer = next_data_structure
530
531            #FIXME - could be cleaner to use domain.store_timestep etc.
532            next_data_structure.store_connectivity()
533            next_data_structure.store_timestep(names)
534            fid.sync()
535            fid.close()
536
537            #restore the old starttime and filename
538            self.domain.starttime = old_domain_starttime
539            self.domain.set_name(old_domain_filename)           
540        else:
541            self.recursion = False
542            domain = self.domain
543
544            # Get the variables
545            time = fid.variables['time']
546            stage = fid.variables['stage']
547            xmomentum = fid.variables['xmomentum']
548            ymomentum = fid.variables['ymomentum']
549            i = len(time)
550            if type(names) not in [types.ListType, types.TupleType]:
551                names = [names]
552
553            if 'stage' in names and 'xmomentum' in names and \
554                   'ymomentum' in names:
555
556                # Get stage
557                Q = domain.quantities['stage']
558                A,_ = Q.get_vertex_values(xy = False,
559                                          precision = self.precision)               
560                z = fid.variables['elevation']
561                stage = choose(A-z[:] >= self.minimum_storable_height,
562                           (z[:], A))
563               
564                # Get xmomentum
565                Q = domain.quantities['xmomentum']
566                xmomentum, _ = Q.get_vertex_values(xy = False,
567                                          precision = self.precision)
568               
569                # Get ymomentum
570                Q = domain.quantities['ymomentum']
571                ymomentum, _ = Q.get_vertex_values(xy = False,
572                                          precision = self.precision)
573                self.writer.quantities(fid, 
574                                                 time=self.domain.time,
575                                                 precision=self.precision,
576                                                 stage=stage,
577                                                 xmomentum=xmomentum,
578                                                 ymomentum=ymomentum)
579            else:
580                # This is producing a sww that is not standard.
581                #Store time
582                time[i] = self.domain.time
583               
584                for name in names:
585                    # Get quantity
586                    Q = domain.quantities[name]
587                    A,V = Q.get_vertex_values(xy = False,
588                                          precision = self.precision)
589
590                    #FIXME: Make this general (see below)
591                    if name == 'stage':
592                        z = fid.variables['elevation']
593                        A = choose(A-z[:] >= self.minimum_storable_height,
594                                   (z[:], A))
595                        stage[i,:] = A.astype(self.precision)
596                    elif name == 'xmomentum':
597                        xmomentum[i,:] = A.astype(self.precision)
598                    elif name == 'ymomentum':
599                        ymomentum[i,:] = A.astype(self.precision)
600
601                   #As in....
602                   #eval( name + '[i,:] = A.astype(self.precision)' )
603                   #FIXME: But we need a UNIT test for that before
604                   # refactoring
605
606
607
608            #Flush and close
609            fid.sync()
610            fid.close()
611
612
613
614#Class for handling checkpoints data
615class Data_format_cpt(Data_format):
616    """Interface to native NetCDF format (.cpt)
617    """
618
619
620    def __init__(self, domain, mode = 'w'):
621        from Scientific.IO.NetCDF import NetCDFFile
622        from Numeric import Int, Float, Float
623
624        self.precision = Float #Use full precision
625
626        Data_format.__init__(self, domain, 'sww', mode)
627
628
629        # NetCDF file definition
630        fid = NetCDFFile(self.filename, mode)
631
632        if mode == 'w':
633            #Create new file
634            fid.institution = 'Geoscience Australia'
635            fid.description = 'Checkpoint data'
636            #fid.smooth = domain.smooth
637            fid.order = domain.default_order
638
639            # dimension definitions
640            fid.createDimension('number_of_volumes', self.number_of_volumes)
641            fid.createDimension('number_of_vertices', 3)
642
643            #Store info at all vertices (no smoothing)
644            fid.createDimension('number_of_points', 3*self.number_of_volumes)
645            fid.createDimension('number_of_timesteps', None) #extensible
646
647            # variable definitions
648
649            #Mesh
650            fid.createVariable('x', self.precision, ('number_of_points',))
651            fid.createVariable('y', self.precision, ('number_of_points',))
652
653
654            fid.createVariable('volumes', Int, ('number_of_volumes',
655                                                'number_of_vertices'))
656
657            fid.createVariable('time', self.precision,
658                               ('number_of_timesteps',))
659
660            #Allocate space for all quantities
661            for name in domain.quantities.keys():
662                fid.createVariable(name, self.precision,
663                                   ('number_of_timesteps',
664                                    'number_of_points'))
665
666        #Close
667        fid.close()
668
669
670    def store_checkpoint(self):
671        """
672        Write x,y coordinates of triangles.
673        Write connectivity (
674        constituting
675        the bed elevation.
676        """
677
678        from Scientific.IO.NetCDF import NetCDFFile
679
680        from Numeric import concatenate
681
682        domain = self.domain
683
684        #Get NetCDF
685        fid = NetCDFFile(self.filename, 'a')  #Open existing file for append
686
687        # Get the variables
688        x = fid.variables['x']
689        y = fid.variables['y']
690
691        volumes = fid.variables['volumes']
692
693        # Get X, Y and bed elevation Z
694        Q = domain.quantities['elevation']
695        X,Y,Z,V = Q.get_vertex_values(xy=True,
696                      precision = self.precision)
697
698
699
700        x[:] = X.astype(self.precision)
701        y[:] = Y.astype(self.precision)
702        z[:] = Z.astype(self.precision)
703
704        volumes[:] = V
705
706        #Close
707        fid.close()
708
709
710    def store_timestep(self, name):
711        """Store time and named quantity to file
712        """
713        from Scientific.IO.NetCDF import NetCDFFile
714        from time import sleep
715
716        #Get NetCDF
717        retries = 0
718        file_open = False
719        while not file_open and retries < 10:
720            try:
721                fid = NetCDFFile(self.filename, 'a') #Open existing file
722            except IOError:
723                #This could happen if someone was reading the file.
724                #In that case, wait a while and try again
725                msg = 'Warning (store_timestep): File %s could not be opened'\
726                  %self.filename
727                msg += ' - trying again'
728                print msg
729                retries += 1
730                sleep(1)
731            else:
732                file_open = True
733
734        if not file_open:
735            msg = 'File %s could not be opened for append' %self.filename
736            raise DataFileNotOPenError, msg
737
738
739        domain = self.domain
740
741        # Get the variables
742        time = fid.variables['time']
743        stage = fid.variables['stage']
744        i = len(time)
745
746        #Store stage
747        time[i] = self.domain.time
748
749        # Get quantity
750        Q = domain.quantities[name]
751        A,V = Q.get_vertex_values(xy=False,
752                  precision = self.precision)
753
754        stage[i,:] = A.astype(self.precision)
755
756        #Flush and close
757        fid.sync()
758        fid.close()
759
760
761#### NED is national exposure database (name changed to NEXIS)
762   
763LAT_TITLE = 'LATITUDE'
764LONG_TITLE = 'LONGITUDE'
765X_TITLE = 'x'
766Y_TITLE = 'y'
767class Exposure_csv:
768    def __init__(self,file_name, latitude_title=LAT_TITLE,
769                 longitude_title=LONG_TITLE, is_x_y_locations=None,
770                 x_title=X_TITLE, y_title=Y_TITLE,
771                 refine_polygon=None, title_check_list=None):
772        """
773        This class is for handling the exposure csv file.
774        It reads the file in and converts the lats and longs to a geospatial
775        data object.
776        Use the methods to read and write columns.
777
778        The format of the csv files it reads is;
779           The first row is a title row.
780           comma's are the delimiters
781           each column is a 'set' of data
782
783        Feel free to use/expand it to read other csv files.
784           
785           
786        It is not for adding and deleting rows
787       
788        Can geospatial handle string attributes? It's not made for them.
789        Currently it can't load and save string att's.
790
791        So just use geospatial to hold the x, y and georef? Bad, since
792        different att's are in diferent structures.  Not so bad, the info
793        to write if the .csv file is saved is in attribute_dic
794
795        The location info is in the geospatial attribute.
796       
797       
798        """
799        self._file_name = file_name
800        self._geospatial = None #
801
802        # self._attribute_dic is a dictionary.
803        #The keys are the column titles.
804        #The values are lists of column data
805       
806        # self._title_index_dic is a dictionary.
807        #The keys are the column titles.
808        #The values are the index positions of file columns.
809        self._attribute_dic, self._title_index_dic = \
810        self._load_exposure_csv(self._file_name,
811                                title_check_list=title_check_list)
812        try:
813            #Have code here that handles caps or lower
814            lats = self._attribute_dic[latitude_title]
815            longs = self._attribute_dic[longitude_title]
816           
817        except KeyError:
818            # maybe a warning..
819            #Let's see if this works..
820            if False != is_x_y_locations:
821                is_x_y_locations = True
822            pass
823        else:
824            self._geospatial = Geospatial_data(latitudes = lats,
825                 longitudes = longs)
826
827        if is_x_y_locations is True:
828            if self._geospatial is not None:
829                pass #fixme throw an error
830            try:
831                xs = self._attribute_dic[x_title]
832                ys = self._attribute_dic[y_title]
833                points = [[float(i),float(j)] for i,j in map(None,xs,ys)]
834            except KeyError:
835                # maybe a warning..
836                msg = "Could not find location information."
837                raise TitleValueError, msg
838            else:
839                self._geospatial = Geospatial_data(data_points=points)
840           
841        # create a list of points that are in the refining_polygon
842        # described by a list of indexes representing the points
843
844    def __cmp__(self, other):
845        #print "self._attribute_dic",self._attribute_dic
846        #print "other._attribute_dic",other._attribute_dic
847        #print "self._title_index_dic", self._title_index_dic
848        #print "other._title_index_dic", other._title_index_dic
849       
850        #check that a is an instance of this class
851        if isinstance(self, type(other)):
852            result = cmp(self._attribute_dic, other._attribute_dic)
853            if result <>0:
854                return result
855            # The order of the columns is important. Therefore..
856            result = cmp(self._title_index_dic, other._title_index_dic)
857            if result <>0:
858                return result
859            for self_ls, other_ls in map(None,self._attribute_dic, \
860                    other._attribute_dic):
861                result = cmp(self._attribute_dic[self_ls],
862                             other._attribute_dic[other_ls])
863                if result <>0:
864                    return result
865            return 0
866        else:
867            return 1
868   
869    def _load_exposure_csv(self, file_name, title_check_list=None):
870        """
871        Load in the csv as a dic, title as key and column info as value, .
872        Also, create a dic, title as key and column index as value,
873        to keep track of the column order.
874        """
875        #
876        attribute_dic = {}
877        title_index_dic = {}
878        titles_stripped = [] # list of titles
879        reader = csv.reader(file(file_name))
880
881        # Read in and manipulate the title info
882        titles = reader.next()
883        for i,title in enumerate(titles):
884            titles_stripped.append(title.strip())
885            title_index_dic[title.strip()] = i
886        title_count = len(titles_stripped)       
887        #print "title_index_dic",title_index_dic
888        if title_check_list is not None:
889            for title_check in title_check_list:
890                #msg = "Reading error.  This row is not present ", title_check
891                #assert title_index_dic.has_key(title_check), msg
892                if not title_index_dic.has_key(title_check):
893                    #reader.close()
894                    msg = "Reading error.  This row is not present ", \
895                          title_check                     
896                    raise IOError, msg
897                   
898       
899       
900        #create a dic of colum values, indexed by column title
901        for line in reader:
902            if len(line) <> title_count:
903                raise IOError #FIXME make this nicer
904            for i, value in enumerate(line):
905                attribute_dic.setdefault(titles_stripped[i],[]).append(value)
906           
907        return attribute_dic, title_index_dic
908
909    def get_column(self, column_name, use_refind_polygon=False):
910        """
911        Given a column name return a list of the column values
912
913        Note, the type of the values will be String!
914        do this to change a list of strings to a list of floats
915        time = [float(x) for x in time]
916       
917        Not implemented:
918        if use_refind_polygon is True, only return values in the
919        refined polygon
920        """
921        if not self._attribute_dic.has_key(column_name):
922            msg = 'Therer is  no column called %s!' %column_name
923            raise TitleValueError, msg
924        return self._attribute_dic[column_name]
925
926
927    def get_value(self, value_column_name,
928                  known_column_name,
929                  known_values,
930                  use_refind_polygon=False):
931        """
932        Do linear interpolation on the known_colum, using the known_value,
933        to return a value of the column_value_name.
934        """
935        pass
936
937
938    def get_location(self, use_refind_polygon=False):
939        """
940        Return a geospatial object which describes the
941        locations of the location file.
942
943        Note, if there is not location info, this returns None.
944       
945        Not implemented:
946        if use_refind_polygon is True, only return values in the
947        refined polygon
948        """
949        return self._geospatial
950
951    def set_column(self, column_name, column_values, overwrite=False):
952        """
953        Add a column to the 'end' (with the right most column being the end)
954        of the csv file.
955
956        Set overwrite to True if you want to overwrite a column.
957       
958        Note, in column_name white space is removed and case is not checked.
959        Precondition
960        The column_name and column_values cannot have comma's in it.
961        """
962       
963        value_row_count = \
964            len(self._attribute_dic[self._title_index_dic.keys()[0]])
965        if len(column_values) <> value_row_count: 
966            msg = 'The number of column values must equal the number of rows.'
967            raise DataMissingValuesError, msg
968       
969        if self._attribute_dic.has_key(column_name):
970            if not overwrite:
971                msg = 'Column name %s already in use!' %column_name
972                raise TitleValueError, msg
973        else:
974            # New title.  Add it to the title index.
975            self._title_index_dic[column_name] = len(self._title_index_dic)
976        self._attribute_dic[column_name] = column_values
977        #print "self._title_index_dic[column_name]",self._title_index_dic
978
979    def save(self, file_name=None):
980        """
981        Save the exposure csv file
982        """
983        if file_name is None:
984            file_name = self._file_name
985       
986        fd = open(file_name,'wb')
987        writer = csv.writer(fd)
988       
989        #Write the title to a cvs file
990        line = [None]* len(self._title_index_dic)
991        for title in self._title_index_dic.iterkeys():
992            line[self._title_index_dic[title]]= title
993        writer.writerow(line)
994       
995        # Write the values to a cvs file
996        value_row_count = \
997            len(self._attribute_dic[self._title_index_dic.keys()[0]])
998        for row_i in range(value_row_count):
999            line = [None]* len(self._title_index_dic)
1000            for title in self._title_index_dic.iterkeys():
1001                line[self._title_index_dic[title]]= \
1002                     self._attribute_dic[title][row_i]
1003            writer.writerow(line)
1004
1005
1006#Auxiliary
1007def write_obj(filename,x,y,z):
1008    """Store x,y,z vectors into filename (obj format)
1009       Vectors are assumed to have dimension (M,3) where
1010       M corresponds to the number elements.
1011       triangles are assumed to be disconnected
1012
1013       The three numbers in each vector correspond to three vertices,
1014
1015       e.g. the x coordinate of vertex 1 of element i is in x[i,1]
1016
1017    """
1018    #print 'Writing obj to %s' % filename
1019
1020    import os.path
1021
1022    root, ext = os.path.splitext(filename)
1023    if ext == '.obj':
1024        FN = filename
1025    else:
1026        FN = filename + '.obj'
1027
1028
1029    outfile = open(FN, 'wb')
1030    outfile.write("# Triangulation as an obj file\n")
1031
1032    M, N = x.shape
1033    assert N==3  #Assuming three vertices per element
1034
1035    for i in range(M):
1036        for j in range(N):
1037            outfile.write("v %f %f %f\n" % (x[i,j],y[i,j],z[i,j]))
1038
1039    for i in range(M):
1040        base = i*N
1041        outfile.write("f %d %d %d\n" % (base+1,base+2,base+3))
1042
1043    outfile.close()
1044
1045
1046#########################################################
1047#Conversion routines
1048########################################################
1049
1050def sww2obj(basefilename, size):
1051    """Convert netcdf based data output to obj
1052    """
1053    from Scientific.IO.NetCDF import NetCDFFile
1054
1055    from Numeric import Float, zeros
1056
1057    #Get NetCDF
1058    FN = create_filename('.', basefilename, 'sww', size)
1059    print 'Reading from ', FN
1060    fid = NetCDFFile(FN, 'r')  #Open existing file for read
1061
1062
1063    # Get the variables
1064    x = fid.variables['x']
1065    y = fid.variables['y']
1066    z = fid.variables['elevation']
1067    time = fid.variables['time']
1068    stage = fid.variables['stage']
1069
1070    M = size  #Number of lines
1071    xx = zeros((M,3), Float)
1072    yy = zeros((M,3), Float)
1073    zz = zeros((M,3), Float)
1074
1075    for i in range(M):
1076        for j in range(3):
1077            xx[i,j] = x[i+j*M]
1078            yy[i,j] = y[i+j*M]
1079            zz[i,j] = z[i+j*M]
1080
1081    #Write obj for bathymetry
1082    FN = create_filename('.', basefilename, 'obj', size)
1083    write_obj(FN,xx,yy,zz)
1084
1085
1086    #Now read all the data with variable information, combine with
1087    #x,y info and store as obj
1088
1089    for k in range(len(time)):
1090        t = time[k]
1091        print 'Processing timestep %f' %t
1092
1093        for i in range(M):
1094            for j in range(3):
1095                zz[i,j] = stage[k,i+j*M]
1096
1097
1098        #Write obj for variable data
1099        #FN = create_filename(basefilename, 'obj', size, time=t)
1100        FN = create_filename('.', basefilename[:5], 'obj', size, time=t)
1101        write_obj(FN,xx,yy,zz)
1102
1103
1104def dat2obj(basefilename):
1105    """Convert line based data output to obj
1106    FIXME: Obsolete?
1107    """
1108
1109    import glob, os
1110    from anuga.config import data_dir
1111
1112
1113    #Get bathymetry and x,y's
1114    lines = open(data_dir+os.sep+basefilename+'_geometry.dat', 'r').readlines()
1115
1116    from Numeric import zeros, Float
1117
1118    M = len(lines)  #Number of lines
1119    x = zeros((M,3), Float)
1120    y = zeros((M,3), Float)
1121    z = zeros((M,3), Float)
1122
1123    ##i = 0
1124    for i, line in enumerate(lines):
1125        tokens = line.split()
1126        values = map(float,tokens)
1127
1128        for j in range(3):
1129            x[i,j] = values[j*3]
1130            y[i,j] = values[j*3+1]
1131            z[i,j] = values[j*3+2]
1132
1133        ##i += 1
1134
1135
1136    #Write obj for bathymetry
1137    write_obj(data_dir+os.sep+basefilename+'_geometry',x,y,z)
1138
1139
1140    #Now read all the data files with variable information, combine with
1141    #x,y info
1142    #and store as obj
1143
1144    files = glob.glob(data_dir+os.sep+basefilename+'*.dat')
1145
1146    for filename in files:
1147        print 'Processing %s' % filename
1148
1149        lines = open(data_dir+os.sep+filename,'r').readlines()
1150        assert len(lines) == M
1151        root, ext = os.path.splitext(filename)
1152
1153        #Get time from filename
1154        i0 = filename.find('_time=')
1155        if i0 == -1:
1156            #Skip bathymetry file
1157            continue
1158
1159        i0 += 6  #Position where time starts
1160        i1 = filename.find('.dat')
1161
1162        if i1 > i0:
1163            t = float(filename[i0:i1])
1164        else:
1165            raise DataTimeError, 'Hmmmm'
1166
1167
1168
1169        ##i = 0
1170        for i, line in enumerate(lines):
1171            tokens = line.split()
1172            values = map(float,tokens)
1173
1174            for j in range(3):
1175                z[i,j] = values[j]
1176
1177            ##i += 1
1178
1179        #Write obj for variable data
1180        write_obj(data_dir+os.sep+basefilename+'_time=%.4f' %t,x,y,z)
1181
1182
1183def filter_netcdf(filename1, filename2, first=0, last=None, step = 1):
1184    """Read netcdf filename1, pick timesteps first:step:last and save to
1185    nettcdf file filename2
1186    """
1187    from Scientific.IO.NetCDF import NetCDFFile
1188
1189    #Get NetCDF
1190    infile = NetCDFFile(filename1, 'r')  #Open existing file for read
1191    outfile = NetCDFFile(filename2, 'w')  #Open new file
1192
1193
1194    #Copy dimensions
1195    for d in infile.dimensions:
1196        outfile.createDimension(d, infile.dimensions[d])
1197
1198    for name in infile.variables:
1199        var = infile.variables[name]
1200        outfile.createVariable(name, var.typecode(), var.dimensions)
1201
1202
1203    #Copy the static variables
1204    for name in infile.variables:
1205        if name == 'time' or name == 'stage':
1206            pass
1207        else:
1208            #Copy
1209            outfile.variables[name][:] = infile.variables[name][:]
1210
1211    #Copy selected timesteps
1212    time = infile.variables['time']
1213    stage = infile.variables['stage']
1214
1215    newtime = outfile.variables['time']
1216    newstage = outfile.variables['stage']
1217
1218    if last is None:
1219        last = len(time)
1220
1221    selection = range(first, last, step)
1222    for i, j in enumerate(selection):
1223        print 'Copying timestep %d of %d (%f)' %(j, last-first, time[j])
1224        newtime[i] = time[j]
1225        newstage[i,:] = stage[j,:]
1226
1227    #Close
1228    infile.close()
1229    outfile.close()
1230
1231
1232#Get data objects
1233def get_dataobject(domain, mode='w'):
1234    """Return instance of class of given format using filename
1235    """
1236
1237    cls = eval('Data_format_%s' %domain.format)
1238    return cls(domain, mode)
1239
1240
1241
1242
1243def dem2pts(basename_in, basename_out=None,
1244            easting_min=None, easting_max=None,
1245            northing_min=None, northing_max=None,
1246            use_cache=False, verbose=False,):
1247    """Read Digitial Elevation model from the following NetCDF format (.dem)
1248
1249    Example:
1250
1251    ncols         3121
1252    nrows         1800
1253    xllcorner     722000
1254    yllcorner     5893000
1255    cellsize      25
1256    NODATA_value  -9999
1257    138.3698 137.4194 136.5062 135.5558 ..........
1258
1259    Convert to NetCDF pts format which is
1260
1261    points:  (Nx2) Float array
1262    elevation: N Float array
1263    """
1264
1265
1266
1267    kwargs = {'basename_out': basename_out,
1268              'easting_min': easting_min,
1269              'easting_max': easting_max,
1270              'northing_min': northing_min,
1271              'northing_max': northing_max,
1272              'verbose': verbose}
1273
1274    if use_cache is True:
1275        from caching import cache
1276        result = cache(_dem2pts, basename_in, kwargs,
1277                       dependencies = [basename_in + '.dem'],
1278                       verbose = verbose)
1279
1280    else:
1281        result = apply(_dem2pts, [basename_in], kwargs)
1282
1283    return result
1284
1285
1286def _dem2pts(basename_in, basename_out=None, verbose=False,
1287            easting_min=None, easting_max=None,
1288            northing_min=None, northing_max=None):
1289    """Read Digitial Elevation model from the following NetCDF format (.dem)
1290
1291    Internal function. See public function dem2pts for details.
1292    """
1293
1294    #FIXME: Can this be written feasibly using write_pts?
1295
1296    import os
1297    from Scientific.IO.NetCDF import NetCDFFile
1298    from Numeric import Float, zeros, reshape, sum
1299
1300    root = basename_in
1301
1302    #Get NetCDF
1303    infile = NetCDFFile(root + '.dem', 'r')  #Open existing netcdf file for read
1304
1305    if verbose: print 'Reading DEM from %s' %(root + '.dem')
1306
1307    ncols = infile.ncols[0]
1308    nrows = infile.nrows[0]
1309    xllcorner = infile.xllcorner[0]  #Easting of lower left corner
1310    yllcorner = infile.yllcorner[0]  #Northing of lower left corner
1311    cellsize = infile.cellsize[0]
1312    NODATA_value = infile.NODATA_value[0]
1313    dem_elevation = infile.variables['elevation']
1314
1315    zone = infile.zone[0]
1316    false_easting = infile.false_easting[0]
1317    false_northing = infile.false_northing[0]
1318
1319    #Text strings
1320    projection = infile.projection
1321    datum = infile.datum
1322    units = infile.units
1323
1324
1325    #Get output file
1326    if basename_out == None:
1327        ptsname = root + '.pts'
1328    else:
1329        ptsname = basename_out + '.pts'
1330
1331    if verbose: print 'Store to NetCDF file %s' %ptsname
1332    # NetCDF file definition
1333    outfile = NetCDFFile(ptsname, 'w')
1334
1335    #Create new file
1336    outfile.institution = 'Geoscience Australia'
1337    outfile.description = 'NetCDF pts format for compact and portable storage ' +\
1338                      'of spatial point data'
1339    #assign default values
1340    if easting_min is None: easting_min = xllcorner
1341    if easting_max is None: easting_max = xllcorner + ncols*cellsize
1342    if northing_min is None: northing_min = yllcorner
1343    if northing_max is None: northing_max = yllcorner + nrows*cellsize
1344
1345    #compute offsets to update georeferencing
1346    easting_offset = xllcorner - easting_min
1347    northing_offset = yllcorner - northing_min
1348
1349    #Georeferencing
1350    outfile.zone = zone
1351    outfile.xllcorner = easting_min #Easting of lower left corner
1352    outfile.yllcorner = northing_min #Northing of lower left corner
1353    outfile.false_easting = false_easting
1354    outfile.false_northing = false_northing
1355
1356    outfile.projection = projection
1357    outfile.datum = datum
1358    outfile.units = units
1359
1360
1361    #Grid info (FIXME: probably not going to be used, but heck)
1362    outfile.ncols = ncols
1363    outfile.nrows = nrows
1364
1365    dem_elevation_r = reshape(dem_elevation, (nrows, ncols))
1366    totalnopoints = nrows*ncols
1367
1368    # calculating number of NODATA_values for each row in clipped region
1369    #FIXME: use array operations to do faster
1370    nn = 0
1371    k = 0
1372    i1_0 = 0
1373    j1_0 = 0
1374    thisj = 0
1375    thisi = 0
1376    for i in range(nrows):
1377        y = (nrows-i-1)*cellsize + yllcorner
1378        for j in range(ncols):
1379            x = j*cellsize + xllcorner
1380            if easting_min <= x <= easting_max and \
1381               northing_min <= y <= northing_max:
1382                thisj = j
1383                thisi = i
1384                if dem_elevation_r[i,j] == NODATA_value: nn += 1
1385
1386                if k == 0:
1387                    i1_0 = i
1388                    j1_0 = j
1389                k += 1
1390
1391    index1 = j1_0
1392    index2 = thisj
1393
1394    # dimension definitions
1395    nrows_in_bounding_box = int(round((northing_max-northing_min)/cellsize))
1396    ncols_in_bounding_box = int(round((easting_max-easting_min)/cellsize))
1397
1398    clippednopoints = (thisi+1-i1_0)*(thisj+1-j1_0)
1399    nopoints = clippednopoints-nn
1400
1401    clipped_dem_elev = dem_elevation_r[i1_0:thisi+1,j1_0:thisj+1]
1402
1403    if verbose:
1404        print 'There are %d values in the elevation' %totalnopoints
1405        print 'There are %d values in the clipped elevation' %clippednopoints
1406        print 'There are %d NODATA_values in the clipped elevation' %nn
1407
1408    outfile.createDimension('number_of_points', nopoints)
1409    outfile.createDimension('number_of_dimensions', 2) #This is 2d data
1410
1411    # variable definitions
1412    outfile.createVariable('points', Float, ('number_of_points',
1413                                             'number_of_dimensions'))
1414    outfile.createVariable('elevation', Float, ('number_of_points',))
1415
1416    # Get handles to the variables
1417    points = outfile.variables['points']
1418    elevation = outfile.variables['elevation']
1419
1420    lenv = index2-index1+1
1421    #Store data
1422    global_index = 0
1423    #for i in range(nrows):
1424    for i in range(i1_0,thisi+1,1):
1425        if verbose and i%((nrows+10)/10)==0:
1426            print 'Processing row %d of %d' %(i, nrows)
1427
1428        lower_index = global_index
1429
1430        v = dem_elevation_r[i,index1:index2+1]
1431        no_NODATA = sum(v == NODATA_value)
1432        if no_NODATA > 0:
1433            newcols = lenv - no_NODATA #ncols_in_bounding_box - no_NODATA
1434        else:
1435            newcols = lenv #ncols_in_bounding_box
1436
1437        telev = zeros(newcols, Float)
1438        tpoints = zeros((newcols, 2), Float)
1439
1440        local_index = 0
1441
1442        y = (nrows-i-1)*cellsize + yllcorner
1443        #for j in range(ncols):
1444        for j in range(j1_0,index2+1,1):
1445
1446            x = j*cellsize + xllcorner
1447            if easting_min <= x <= easting_max and \
1448               northing_min <= y <= northing_max and \
1449               dem_elevation_r[i,j] <> NODATA_value:
1450                tpoints[local_index, :] = [x-easting_min,y-northing_min]
1451                telev[local_index] = dem_elevation_r[i, j]
1452                global_index += 1
1453                local_index += 1
1454
1455        upper_index = global_index
1456
1457        if upper_index == lower_index + newcols:
1458            points[lower_index:upper_index, :] = tpoints
1459            elevation[lower_index:upper_index] = telev
1460
1461    assert global_index == nopoints, 'index not equal to number of points'
1462
1463    infile.close()
1464    outfile.close()
1465
1466
1467
1468def _read_hecras_cross_sections(lines):
1469    """Return block of surface lines for each cross section
1470    Starts with SURFACE LINE,
1471    Ends with END CROSS-SECTION
1472    """
1473
1474    points = []
1475
1476    reading_surface = False
1477    for i, line in enumerate(lines):
1478
1479        if len(line.strip()) == 0:    #Ignore blanks
1480            continue
1481
1482        if lines[i].strip().startswith('SURFACE LINE'):
1483            reading_surface = True
1484            continue
1485
1486        if lines[i].strip().startswith('END') and reading_surface:
1487            yield points
1488            reading_surface = False
1489            points = []
1490
1491        if reading_surface:
1492            fields = line.strip().split(',')
1493            easting = float(fields[0])
1494            northing = float(fields[1])
1495            elevation = float(fields[2])
1496            points.append([easting, northing, elevation])
1497
1498
1499
1500
1501def hecras_cross_sections2pts(basename_in,
1502                              basename_out=None,
1503                              verbose=False):
1504    """Read HEC-RAS Elevation datal from the following ASCII format (.sdf)
1505
1506    Example:
1507
1508
1509# RAS export file created on Mon 15Aug2005 11:42
1510# by HEC-RAS Version 3.1.1
1511
1512BEGIN HEADER:
1513  UNITS: METRIC
1514  DTM TYPE: TIN
1515  DTM: v:\1\cit\perth_topo\river_tin
1516  STREAM LAYER: c:\local\hecras\21_02_03\up_canning_cent3d.shp
1517  CROSS-SECTION LAYER: c:\local\hecras\21_02_03\up_can_xs3d.shp
1518  MAP PROJECTION: UTM
1519  PROJECTION ZONE: 50
1520  DATUM: AGD66
1521  VERTICAL DATUM:
1522  NUMBER OF REACHES:  19
1523  NUMBER OF CROSS-SECTIONS:  14206
1524END HEADER:
1525
1526
1527Only the SURFACE LINE data of the following form will be utilised
1528
1529  CROSS-SECTION:
1530    STREAM ID:Southern-Wungong
1531    REACH ID:Southern-Wungong
1532    STATION:19040.*
1533    CUT LINE:
1534      405548.671603161 , 6438142.7594925
1535      405734.536092045 , 6438326.10404912
1536      405745.130459356 , 6438331.48627354
1537      405813.89633823 , 6438368.6272789
1538    SURFACE LINE:
1539     405548.67,   6438142.76,   35.37
1540     405552.24,   6438146.28,   35.41
1541     405554.78,   6438148.78,   35.44
1542     405555.80,   6438149.79,   35.44
1543     405559.37,   6438153.31,   35.45
1544     405560.88,   6438154.81,   35.44
1545     405562.93,   6438156.83,   35.42
1546     405566.50,   6438160.35,   35.38
1547     405566.99,   6438160.83,   35.37
1548     ...
1549   END CROSS-SECTION
1550
1551    Convert to NetCDF pts format which is
1552
1553    points:  (Nx2) Float array
1554    elevation: N Float array
1555    """
1556
1557    import os
1558    from Scientific.IO.NetCDF import NetCDFFile
1559    from Numeric import Float, zeros, reshape
1560
1561    root = basename_in
1562
1563    #Get ASCII file
1564    infile = open(root + '.sdf', 'r')  #Open SDF file for read
1565
1566    if verbose: print 'Reading DEM from %s' %(root + '.sdf')
1567
1568    lines = infile.readlines()
1569    infile.close()
1570
1571    if verbose: print 'Converting to pts format'
1572
1573    i = 0
1574    while lines[i].strip() == '' or lines[i].strip().startswith('#'):
1575        i += 1
1576
1577    assert lines[i].strip().upper() == 'BEGIN HEADER:'
1578    i += 1
1579
1580    assert lines[i].strip().upper().startswith('UNITS:')
1581    units = lines[i].strip().split()[1]
1582    i += 1
1583
1584    assert lines[i].strip().upper().startswith('DTM TYPE:')
1585    i += 1
1586
1587    assert lines[i].strip().upper().startswith('DTM:')
1588    i += 1
1589
1590    assert lines[i].strip().upper().startswith('STREAM')
1591    i += 1
1592
1593    assert lines[i].strip().upper().startswith('CROSS')
1594    i += 1
1595
1596    assert lines[i].strip().upper().startswith('MAP PROJECTION:')
1597    projection = lines[i].strip().split(':')[1]
1598    i += 1
1599
1600    assert lines[i].strip().upper().startswith('PROJECTION ZONE:')
1601    zone = int(lines[i].strip().split(':')[1])
1602    i += 1
1603
1604    assert lines[i].strip().upper().startswith('DATUM:')
1605    datum = lines[i].strip().split(':')[1]
1606    i += 1
1607
1608    assert lines[i].strip().upper().startswith('VERTICAL DATUM:')
1609    i += 1
1610
1611    assert lines[i].strip().upper().startswith('NUMBER OF REACHES:')
1612    i += 1
1613
1614    assert lines[i].strip().upper().startswith('NUMBER OF CROSS-SECTIONS:')
1615    number_of_cross_sections = int(lines[i].strip().split(':')[1])
1616    i += 1
1617
1618
1619    #Now read all points
1620    points = []
1621    elevation = []
1622    for j, entries in enumerate(_read_hecras_cross_sections(lines[i:])):
1623        for k, entry in enumerate(entries):
1624            points.append(entry[:2])
1625            elevation.append(entry[2])
1626
1627
1628    msg = 'Actual #number_of_cross_sections == %d, Reported as %d'\
1629          %(j+1, number_of_cross_sections)
1630    assert j+1 == number_of_cross_sections, msg
1631
1632    #Get output file
1633    if basename_out == None:
1634        ptsname = root + '.pts'
1635    else:
1636        ptsname = basename_out + '.pts'
1637
1638    geo_ref = Geo_reference(zone, 0, 0, datum, projection, units)
1639    geo = Geospatial_data(points, {"elevation":elevation},
1640                          verbose=verbose, geo_reference=geo_ref)
1641    geo.export_points_file(ptsname)
1642
1643def export_grid(basename_in, extra_name_out = None,
1644                quantities = None, # defaults to elevation
1645                timestep = None,
1646                reduction = None,
1647                cellsize = 10,
1648                NODATA_value = -9999,
1649                easting_min = None,
1650                easting_max = None,
1651                northing_min = None,
1652                northing_max = None,
1653                verbose = False,
1654                origin = None,
1655                datum = 'WGS84',
1656                format = 'ers'):
1657    """
1658   
1659    Wrapper for sww2dem. - see sww2dem to find out what most of the
1660    parameters do.
1661
1662    Quantities is a list of quantities.  Each quantity will be
1663    calculated for each sww file.
1664
1665    This returns the basenames of the files returned, which is made up
1666    of the dir and all of the file name, except the extension.
1667
1668    This function returns the names of the files produced.
1669
1670    It will also produce as many output files as there are input sww files.
1671    """
1672   
1673    if quantities is None:
1674        quantities = ['elevation']
1675       
1676    if type(quantities) is str:
1677            quantities = [quantities]
1678
1679    # How many sww files are there?
1680    dir, base = os.path.split(basename_in)
1681    #print "basename_in",basename_in
1682    #print "base",base
1683
1684    iterate_over = get_all_swwfiles(dir,base,verbose)
1685   
1686    if dir == "":
1687        dir = "." # Unix compatibility
1688   
1689    files_out = []
1690    #print 'sww_file',iterate_over
1691    for sww_file in iterate_over:
1692        for quantity in quantities:
1693            if extra_name_out is None:
1694                basename_out = sww_file + '_' + quantity
1695            else:
1696                basename_out = sww_file + '_' + quantity + '_' \
1697                               + extra_name_out
1698#            print "basename_out", basename_out
1699       
1700            file_out = sww2dem(dir+sep+sww_file, dir+sep+basename_out,
1701                               quantity, 
1702                               timestep,
1703                               reduction,
1704                               cellsize,
1705                               NODATA_value,
1706                               easting_min,
1707                               easting_max,
1708                               northing_min,
1709                               northing_max,
1710                               verbose,
1711                               origin,
1712                               datum,
1713                               format)
1714            files_out.append(file_out)
1715    #print "basenames_out after",basenames_out
1716    return files_out
1717
1718
1719def get_timeseries(production_dirs, output_dir, scenario_name, gauges_dir_name,
1720                   plot_quantity, generate_fig = False,
1721                   reportname = None, surface = False, time_min = None,
1722                   time_max = None, title_on = False, verbose = True,
1723                   nodes=None):
1724    """
1725    nodes - number of processes used.
1726
1727    warning - this function has no tests
1728    """
1729    if reportname == None:
1730        report = False
1731    else:
1732        report = True
1733       
1734    if nodes is None:
1735        is_parallel = False
1736    else:
1737        is_parallel = True
1738       
1739    # Generate figures
1740    swwfiles = {}
1741   
1742    if is_parallel is True:   
1743        for i in range(nodes):
1744            print 'Sending node %d of %d' %(i,nodes)
1745            swwfiles = {}
1746            if not reportname == None:
1747                reportname = report_name + '_%s' %(i)
1748            for label_id in production_dirs.keys():
1749                if label_id == 'boundaries':
1750                    swwfile = best_boundary_sww
1751                else:
1752                    file_loc = output_dir + label_id + sep
1753                    sww_extra = '_P%s_%s' %(i,nodes)
1754                    swwfile = file_loc + scenario_name + sww_extra + '.sww'
1755                    print 'swwfile',swwfile
1756                    swwfiles[swwfile] = label_id
1757
1758                texname, elev_output = sww2timeseries(swwfiles,
1759                                              gauges_dir_name,
1760                                              production_dirs,
1761                                              report = report,
1762                                              reportname = reportname,
1763                                              plot_quantity = plot_quantity,
1764                                              generate_fig = generate_fig,
1765                                              surface = surface,
1766                                              time_min = time_min,
1767                                              time_max = time_max,
1768                                              title_on = title_on,
1769                                              verbose = verbose)
1770    else:   
1771        for label_id in production_dirs.keys():       
1772            if label_id == 'boundaries':
1773                print 'boundaries'
1774                file_loc = project.boundaries_in_dir
1775                swwfile = project.boundaries_dir_name3 + '.sww'
1776                #  swwfile = boundary_dir_filename
1777            else:
1778                file_loc = output_dir + label_id + sep
1779                swwfile = file_loc + scenario_name + '.sww'
1780            swwfiles[swwfile] = label_id
1781       
1782        texname, elev_output = sww2timeseries(swwfiles,
1783                                              gauges_dir_name,
1784                                              production_dirs,
1785                                              report = report,
1786                                              reportname = reportname,
1787                                              plot_quantity = plot_quantity,
1788                                              generate_fig = generate_fig,
1789                                              surface = surface,
1790                                              time_min = time_min,
1791                                              time_max = time_max,
1792                                              title_on = title_on,
1793                                              verbose = verbose)
1794                                         
1795
1796   
1797def sww2dem(basename_in, basename_out = None,
1798            quantity = None, # defaults to elevation
1799            timestep = None,
1800            reduction = None,
1801            cellsize = 10,
1802            NODATA_value = -9999,
1803            easting_min = None,
1804            easting_max = None,
1805            northing_min = None,
1806            northing_max = None,
1807            verbose = False,
1808            origin = None,
1809            datum = 'WGS84',
1810            format = 'ers'):
1811
1812    """Read SWW file and convert to Digitial Elevation model format (.asc or .ers)
1813
1814    Example (ASC):
1815
1816    ncols         3121
1817    nrows         1800
1818    xllcorner     722000
1819    yllcorner     5893000
1820    cellsize      25
1821    NODATA_value  -9999
1822    138.3698 137.4194 136.5062 135.5558 ..........
1823
1824    Also write accompanying file with same basename_in but extension .prj
1825    used to fix the UTM zone, datum, false northings and eastings.
1826
1827    The prj format is assumed to be as
1828
1829    Projection    UTM
1830    Zone          56
1831    Datum         WGS84
1832    Zunits        NO
1833    Units         METERS
1834    Spheroid      WGS84
1835    Xshift        0.0000000000
1836    Yshift        10000000.0000000000
1837    Parameters
1838
1839
1840    The parameter quantity must be the name of an existing quantity or
1841    an expression involving existing quantities. The default is
1842    'elevation'. Quantity is not a list of quantities.
1843
1844    if timestep (an index) is given, output quantity at that timestep
1845
1846    if reduction is given use that to reduce quantity over all timesteps.
1847
1848    datum
1849
1850    format can be either 'asc' or 'ers'
1851    """
1852
1853    import sys
1854    from Numeric import array, Float, concatenate, NewAxis, zeros, reshape, sometrue
1855    from Numeric import array2string
1856
1857    from anuga.utilities.polygon import inside_polygon, outside_polygon, separate_points_by_polygon
1858    from anuga.abstract_2d_finite_volumes.util import apply_expression_to_dictionary
1859
1860    msg = 'Format must be either asc or ers'
1861    assert format.lower() in ['asc', 'ers'], msg
1862
1863
1864    false_easting = 500000
1865    false_northing = 10000000
1866
1867    if quantity is None:
1868        quantity = 'elevation'
1869       
1870    if reduction is None:
1871        reduction = max
1872
1873    if basename_out is None:
1874        basename_out = basename_in + '_%s' %quantity
1875
1876    if quantity_formula.has_key(quantity):
1877        quantity = quantity_formula[quantity]
1878       
1879    swwfile = basename_in + '.sww'
1880    demfile = basename_out + '.' + format
1881    # Note the use of a .ers extension is optional (write_ermapper_grid will
1882    # deal with either option
1883   
1884    #if verbose: bye= nsuadsfd[0] # uncomment to check catching verbose errors
1885   
1886    # Read sww file
1887    if verbose: 
1888        print 'Reading from %s' %swwfile
1889        print 'Output directory is %s' %basename_out
1890   
1891    from Scientific.IO.NetCDF import NetCDFFile
1892    fid = NetCDFFile(swwfile)
1893
1894    #Get extent and reference
1895    x = fid.variables['x'][:]
1896    y = fid.variables['y'][:]
1897    volumes = fid.variables['volumes'][:]
1898    if timestep is not None:
1899        times = fid.variables['time'][timestep]
1900    else:
1901        times = fid.variables['time'][:]
1902
1903    number_of_timesteps = fid.dimensions['number_of_timesteps']
1904    number_of_points = fid.dimensions['number_of_points']
1905   
1906    if origin is None:
1907
1908        # Get geo_reference
1909        # sww files don't have to have a geo_ref
1910        try:
1911            geo_reference = Geo_reference(NetCDFObject=fid)
1912        except AttributeError, e:
1913            geo_reference = Geo_reference() # Default georef object
1914
1915        xllcorner = geo_reference.get_xllcorner()
1916        yllcorner = geo_reference.get_yllcorner()
1917        zone = geo_reference.get_zone()
1918    else:
1919        zone = origin[0]
1920        xllcorner = origin[1]
1921        yllcorner = origin[2]
1922
1923
1924
1925    # FIXME: Refactor using code from Interpolation_function.statistics (in interpolate.py)
1926    # Something like print swwstats(swwname)
1927    if verbose:
1928        print '------------------------------------------------'
1929        print 'Statistics of SWW file:'
1930        print '  Name: %s' %swwfile
1931        print '  Reference:'
1932        print '    Lower left corner: [%f, %f]'\
1933              %(xllcorner, yllcorner)
1934        if timestep is not None:
1935            print '    Time: %f' %(times)
1936        else:
1937            print '    Start time: %f' %fid.starttime[0]
1938        print '  Extent:'
1939        print '    x [m] in [%f, %f], len(x) == %d'\
1940              %(min(x.flat), max(x.flat), len(x.flat))
1941        print '    y [m] in [%f, %f], len(y) == %d'\
1942              %(min(y.flat), max(y.flat), len(y.flat))
1943        if timestep is not None:
1944            print '    t [s] = %f, len(t) == %d' %(times, 1)
1945        else:
1946            print '    t [s] in [%f, %f], len(t) == %d'\
1947              %(min(times), max(times), len(times))
1948        print '  Quantities [SI units]:'
1949        for name in ['stage', 'xmomentum', 'ymomentum']:
1950            q = fid.variables[name][:].flat
1951            if timestep is not None:
1952                q = q[timestep*len(x):(timestep+1)*len(x)]
1953            if verbose: print '    %s in [%f, %f]' %(name, min(q), max(q))
1954        for name in ['elevation']:
1955            q = fid.variables[name][:].flat
1956            if verbose: print '    %s in [%f, %f]' %(name, min(q), max(q))
1957           
1958    # Get quantity and reduce if applicable
1959    if verbose: print 'Processing quantity %s' %quantity
1960
1961    # Turn NetCDF objects into Numeric arrays
1962    quantity_dict = {}
1963    for name in fid.variables.keys():
1964        quantity_dict[name] = fid.variables[name][:] 
1965
1966
1967    # Convert quantity expression to quantities found in sww file   
1968    q = apply_expression_to_dictionary(quantity, quantity_dict)
1969
1970    if len(q.shape) == 2:
1971        #q has a time component and needs to be reduced along
1972        #the temporal dimension
1973        if verbose: print 'Reducing quantity %s' %quantity
1974        q_reduced = zeros( number_of_points, Float )
1975       
1976        if timestep is not None:
1977            for k in range(number_of_points):
1978                q_reduced[k] = q[timestep,k] 
1979        else:
1980            for k in range(number_of_points):
1981                q_reduced[k] = reduction( q[:,k] )
1982
1983        q = q_reduced
1984
1985    #Post condition: Now q has dimension: number_of_points
1986    assert len(q.shape) == 1
1987    assert q.shape[0] == number_of_points
1988
1989
1990    if verbose:
1991        print 'Processed values for %s are in [%f, %f]' %(quantity, min(q), max(q))
1992
1993
1994    #Create grid and update xll/yll corner and x,y
1995
1996    #Relative extent
1997    if easting_min is None:
1998        xmin = min(x)
1999    else:
2000        xmin = easting_min - xllcorner
2001
2002    if easting_max is None:
2003        xmax = max(x)
2004    else:
2005        xmax = easting_max - xllcorner
2006
2007    if northing_min is None:
2008        ymin = min(y)
2009    else:
2010        ymin = northing_min - yllcorner
2011
2012    if northing_max is None:
2013        ymax = max(y)
2014    else:
2015        ymax = northing_max - yllcorner
2016
2017
2018
2019    if verbose: print 'Creating grid'
2020    ncols = int((xmax-xmin)/cellsize)+1
2021    nrows = int((ymax-ymin)/cellsize)+1
2022
2023
2024    #New absolute reference and coordinates
2025    newxllcorner = xmin+xllcorner
2026    newyllcorner = ymin+yllcorner
2027
2028    x = x+xllcorner-newxllcorner
2029    y = y+yllcorner-newyllcorner
2030   
2031    vertex_points = concatenate ((x[:, NewAxis] ,y[:, NewAxis]), axis = 1)
2032    assert len(vertex_points.shape) == 2
2033
2034    grid_points = zeros ( (ncols*nrows, 2), Float )
2035
2036
2037    for i in xrange(nrows):
2038        if format.lower() == 'asc':
2039            yg = i*cellsize
2040        else:
2041        #this will flip the order of the y values for ers
2042            yg = (nrows-i)*cellsize
2043
2044        for j in xrange(ncols):
2045            xg = j*cellsize
2046            k = i*ncols + j
2047
2048            grid_points[k,0] = xg
2049            grid_points[k,1] = yg
2050
2051    #Interpolate
2052    from anuga.fit_interpolate.interpolate import Interpolate
2053
2054    # Remove loners from vertex_points, volumes here
2055    vertex_points, volumes = remove_lone_verts(vertex_points, volumes)
2056    #export_mesh_file('monkey.tsh',{'vertices':vertex_points, 'triangles':volumes})
2057    #import sys; sys.exit()
2058    interp = Interpolate(vertex_points, volumes, verbose = verbose)
2059
2060    #Interpolate using quantity values
2061    if verbose: print 'Interpolating'
2062    grid_values = interp.interpolate(q, grid_points).flat
2063
2064
2065    if verbose:
2066        print 'Interpolated values are in [%f, %f]' %(min(grid_values),
2067                                                      max(grid_values))
2068
2069    #Assign NODATA_value to all points outside bounding polygon (from interpolation mesh)
2070    P = interp.mesh.get_boundary_polygon()
2071    outside_indices = outside_polygon(grid_points, P, closed=True)
2072
2073    for i in outside_indices:
2074        grid_values[i] = NODATA_value
2075
2076
2077
2078
2079    if format.lower() == 'ers':
2080        # setup ERS header information
2081        grid_values = reshape(grid_values,(nrows, ncols))
2082        header = {}
2083        header['datum'] = '"' + datum + '"'
2084        # FIXME The use of hardwired UTM and zone number needs to be made optional
2085        # FIXME Also need an automatic test for coordinate type (i.e. EN or LL)
2086        header['projection'] = '"UTM-' + str(zone) + '"'
2087        header['coordinatetype'] = 'EN'
2088        if header['coordinatetype'] == 'LL':
2089            header['longitude'] = str(newxllcorner)
2090            header['latitude'] = str(newyllcorner)
2091        elif header['coordinatetype'] == 'EN':
2092            header['eastings'] = str(newxllcorner)
2093            header['northings'] = str(newyllcorner)
2094        header['nullcellvalue'] = str(NODATA_value)
2095        header['xdimension'] = str(cellsize)
2096        header['ydimension'] = str(cellsize)
2097        header['value'] = '"' + quantity + '"'
2098        #header['celltype'] = 'IEEE8ByteReal'  #FIXME: Breaks unit test
2099
2100
2101        #Write
2102        if verbose: print 'Writing %s' %demfile
2103        import ermapper_grids
2104        ermapper_grids.write_ermapper_grid(demfile, grid_values, header)
2105
2106        fid.close()
2107    else:
2108        #Write to Ascii format
2109
2110        #Write prj file
2111        prjfile = basename_out + '.prj'
2112
2113        if verbose: print 'Writing %s' %prjfile
2114        prjid = open(prjfile, 'w')
2115        prjid.write('Projection    %s\n' %'UTM')
2116        prjid.write('Zone          %d\n' %zone)
2117        prjid.write('Datum         %s\n' %datum)
2118        prjid.write('Zunits        NO\n')
2119        prjid.write('Units         METERS\n')
2120        prjid.write('Spheroid      %s\n' %datum)
2121        prjid.write('Xshift        %d\n' %false_easting)
2122        prjid.write('Yshift        %d\n' %false_northing)
2123        prjid.write('Parameters\n')
2124        prjid.close()
2125
2126
2127
2128        if verbose: print 'Writing %s' %demfile
2129
2130        ascid = open(demfile, 'w')
2131
2132        ascid.write('ncols         %d\n' %ncols)
2133        ascid.write('nrows         %d\n' %nrows)
2134        ascid.write('xllcorner     %d\n' %newxllcorner)
2135        ascid.write('yllcorner     %d\n' %newyllcorner)
2136        ascid.write('cellsize      %f\n' %cellsize)
2137        ascid.write('NODATA_value  %d\n' %NODATA_value)
2138
2139
2140        #Get bounding polygon from mesh
2141        #P = interp.mesh.get_boundary_polygon()
2142        #inside_indices = inside_polygon(grid_points, P)
2143
2144        for i in range(nrows):
2145            if verbose and i%((nrows+10)/10)==0:
2146                print 'Doing row %d of %d' %(i, nrows)
2147
2148            base_index = (nrows-i-1)*ncols
2149
2150            slice = grid_values[base_index:base_index+ncols]
2151            s = array2string(slice, max_line_width=sys.maxint)
2152            ascid.write(s[1:-1] + '\n')
2153
2154
2155            #print
2156            #for j in range(ncols):
2157            #    index = base_index+j#
2158            #    print grid_values[index],
2159            #    ascid.write('%f ' %grid_values[index])
2160            #ascid.write('\n')
2161
2162        #Close
2163        ascid.close()
2164        fid.close()
2165
2166        return basename_out
2167
2168#Backwards compatibility
2169def sww2asc(basename_in, basename_out = None,
2170            quantity = None,
2171            timestep = None,
2172            reduction = None,
2173            cellsize = 10,
2174            verbose = False,
2175            origin = None):
2176    print 'sww2asc will soon be obsoleted - please use sww2dem'
2177    sww2dem(basename_in,
2178            basename_out = basename_out,
2179            quantity = quantity,
2180            timestep = timestep,
2181            reduction = reduction,
2182            cellsize = cellsize,
2183            verbose = verbose,
2184            origin = origin,
2185        datum = 'WGS84',
2186        format = 'asc')
2187
2188def sww2ers(basename_in, basename_out = None,
2189            quantity = None,
2190            timestep = None,
2191            reduction = None,
2192            cellsize = 10,
2193            verbose = False,
2194            origin = None,
2195            datum = 'WGS84'):
2196    print 'sww2ers will soon be obsoleted - please use sww2dem'
2197    sww2dem(basename_in,
2198            basename_out = basename_out,
2199            quantity = quantity,
2200            timestep = timestep,
2201            reduction = reduction,
2202            cellsize = cellsize,
2203            verbose = verbose,
2204            origin = origin,
2205            datum = datum,
2206            format = 'ers')
2207################################# END COMPATIBILITY ##############
2208
2209
2210
2211def sww2pts(basename_in, basename_out=None,
2212            data_points=None,
2213            quantity=None,
2214            timestep=None,
2215            reduction=None,
2216            NODATA_value=-9999,
2217            verbose=False,
2218            origin=None):
2219            #datum = 'WGS84')
2220
2221
2222    """Read SWW file and convert to interpolated values at selected points
2223
2224    The parameter quantity' must be the name of an existing quantity or
2225    an expression involving existing quantities. The default is
2226    'elevation'.
2227
2228    if timestep (an index) is given, output quantity at that timestep
2229
2230    if reduction is given use that to reduce quantity over all timesteps.
2231
2232    data_points (Nx2 array) give locations of points where quantity is to be computed.
2233   
2234    """
2235
2236    import sys
2237    from Numeric import array, Float, concatenate, NewAxis, zeros, reshape, sometrue
2238    from Numeric import array2string
2239
2240    from anuga.utilities.polygon import inside_polygon, outside_polygon, separate_points_by_polygon
2241    from anuga.abstract_2d_finite_volumes.util import apply_expression_to_dictionary
2242
2243    from anuga.geospatial_data.geospatial_data import Geospatial_data
2244
2245    if quantity is None:
2246        quantity = 'elevation'
2247
2248    if reduction is None:
2249        reduction = max
2250
2251    if basename_out is None:
2252        basename_out = basename_in + '_%s' %quantity
2253
2254    swwfile = basename_in + '.sww'
2255    ptsfile = basename_out + '.pts'
2256
2257    # Read sww file
2258    if verbose: print 'Reading from %s' %swwfile
2259    from Scientific.IO.NetCDF import NetCDFFile
2260    fid = NetCDFFile(swwfile)
2261
2262    # Get extent and reference
2263    x = fid.variables['x'][:]
2264    y = fid.variables['y'][:]
2265    volumes = fid.variables['volumes'][:]
2266
2267    number_of_timesteps = fid.dimensions['number_of_timesteps']
2268    number_of_points = fid.dimensions['number_of_points']
2269    if origin is None:
2270
2271        # Get geo_reference
2272        # sww files don't have to have a geo_ref
2273        try:
2274            geo_reference = Geo_reference(NetCDFObject=fid)
2275        except AttributeError, e:
2276            geo_reference = Geo_reference() #Default georef object
2277
2278        xllcorner = geo_reference.get_xllcorner()
2279        yllcorner = geo_reference.get_yllcorner()
2280        zone = geo_reference.get_zone()
2281    else:
2282        zone = origin[0]
2283        xllcorner = origin[1]
2284        yllcorner = origin[2]
2285
2286
2287
2288    # FIXME: Refactor using code from file_function.statistics
2289    # Something like print swwstats(swwname)
2290    if verbose:
2291        x = fid.variables['x'][:]
2292        y = fid.variables['y'][:]
2293        times = fid.variables['time'][:]
2294        print '------------------------------------------------'
2295        print 'Statistics of SWW file:'
2296        print '  Name: %s' %swwfile
2297        print '  Reference:'
2298        print '    Lower left corner: [%f, %f]'\
2299              %(xllcorner, yllcorner)
2300        print '    Start time: %f' %fid.starttime[0]
2301        print '  Extent:'
2302        print '    x [m] in [%f, %f], len(x) == %d'\
2303              %(min(x.flat), max(x.flat), len(x.flat))
2304        print '    y [m] in [%f, %f], len(y) == %d'\
2305              %(min(y.flat), max(y.flat), len(y.flat))
2306        print '    t [s] in [%f, %f], len(t) == %d'\
2307              %(min(times), max(times), len(times))
2308        print '  Quantities [SI units]:'
2309        for name in ['stage', 'xmomentum', 'ymomentum', 'elevation']:
2310            q = fid.variables[name][:].flat
2311            print '    %s in [%f, %f]' %(name, min(q), max(q))
2312
2313
2314
2315    # Get quantity and reduce if applicable
2316    if verbose: print 'Processing quantity %s' %quantity
2317
2318    # Turn NetCDF objects into Numeric arrays
2319    quantity_dict = {}
2320    for name in fid.variables.keys():
2321        quantity_dict[name] = fid.variables[name][:]
2322
2323
2324
2325    # Convert quantity expression to quantities found in sww file   
2326    q = apply_expression_to_dictionary(quantity, quantity_dict)
2327
2328
2329
2330    if len(q.shape) == 2:
2331        # q has a time component and needs to be reduced along
2332        # the temporal dimension
2333        if verbose: print 'Reducing quantity %s' %quantity
2334        q_reduced = zeros( number_of_points, Float )
2335
2336        for k in range(number_of_points):
2337            q_reduced[k] = reduction( q[:,k] )
2338
2339        q = q_reduced
2340
2341    # Post condition: Now q has dimension: number_of_points
2342    assert len(q.shape) == 1
2343    assert q.shape[0] == number_of_points
2344
2345
2346    if verbose:
2347        print 'Processed values for %s are in [%f, %f]' %(quantity, min(q), max(q))
2348
2349
2350    # Create grid and update xll/yll corner and x,y
2351    vertex_points = concatenate ((x[:, NewAxis] ,y[:, NewAxis]), axis = 1)
2352    assert len(vertex_points.shape) == 2
2353
2354    # Interpolate
2355    from anuga.fit_interpolate.interpolate import Interpolate
2356    interp = Interpolate(vertex_points, volumes, verbose = verbose)
2357
2358    # Interpolate using quantity values
2359    if verbose: print 'Interpolating'
2360    interpolated_values = interp.interpolate(q, data_points).flat
2361
2362
2363    if verbose:
2364        print 'Interpolated values are in [%f, %f]' %(min(interpolated_values),
2365                                                      max(interpolated_values))
2366
2367    # Assign NODATA_value to all points outside bounding polygon (from interpolation mesh)
2368    P = interp.mesh.get_boundary_polygon()
2369    outside_indices = outside_polygon(data_points, P, closed=True)
2370
2371    for i in outside_indices:
2372        interpolated_values[i] = NODATA_value
2373
2374
2375    # Store results   
2376    G = Geospatial_data(data_points=data_points,
2377                        attributes=interpolated_values)
2378
2379    G.export_points_file(ptsfile, absolute = True)
2380
2381    fid.close()
2382
2383
2384def convert_dem_from_ascii2netcdf(basename_in, basename_out = None,
2385                                  use_cache = False,
2386                                  verbose = False):
2387    """Read Digitial Elevation model from the following ASCII format (.asc)
2388
2389    Example:
2390
2391    ncols         3121
2392    nrows         1800
2393    xllcorner     722000
2394    yllcorner     5893000
2395    cellsize      25
2396    NODATA_value  -9999
2397    138.3698 137.4194 136.5062 135.5558 ..........
2398
2399    Convert basename_in + '.asc' to NetCDF format (.dem)
2400    mimicking the ASCII format closely.
2401
2402
2403    An accompanying file with same basename_in but extension .prj must exist
2404    and is used to fix the UTM zone, datum, false northings and eastings.
2405
2406    The prj format is assumed to be as
2407
2408    Projection    UTM
2409    Zone          56
2410    Datum         WGS84
2411    Zunits        NO
2412    Units         METERS
2413    Spheroid      WGS84
2414    Xshift        0.0000000000
2415    Yshift        10000000.0000000000
2416    Parameters
2417    """
2418
2419
2420
2421    kwargs = {'basename_out': basename_out, 'verbose': verbose}
2422
2423    if use_cache is True:
2424        from caching import cache
2425        result = cache(_convert_dem_from_ascii2netcdf, basename_in, kwargs,
2426                       dependencies = [basename_in + '.asc'],
2427                       verbose = verbose)
2428
2429    else:
2430        result = apply(_convert_dem_from_ascii2netcdf, [basename_in], kwargs)
2431
2432    return result
2433
2434
2435
2436
2437
2438
2439def _convert_dem_from_ascii2netcdf(basename_in, basename_out = None,
2440                                  verbose = False):
2441    """Read Digitial Elevation model from the following ASCII format (.asc)
2442
2443    Internal function. See public function convert_dem_from_ascii2netcdf for details.
2444    """
2445
2446    import os
2447    from Scientific.IO.NetCDF import NetCDFFile
2448    from Numeric import Float, array
2449
2450    #root, ext = os.path.splitext(basename_in)
2451    root = basename_in
2452
2453    ###########################################
2454    # Read Meta data
2455    if verbose: print 'Reading METADATA from %s' %root + '.prj'
2456    metadatafile = open(root + '.prj')
2457    metalines = metadatafile.readlines()
2458    metadatafile.close()
2459
2460    L = metalines[0].strip().split()
2461    assert L[0].strip().lower() == 'projection'
2462    projection = L[1].strip()                   #TEXT
2463
2464    L = metalines[1].strip().split()
2465    assert L[0].strip().lower() == 'zone'
2466    zone = int(L[1].strip())
2467
2468    L = metalines[2].strip().split()
2469    assert L[0].strip().lower() == 'datum'
2470    datum = L[1].strip()                        #TEXT
2471
2472    L = metalines[3].strip().split()
2473    assert L[0].strip().lower() == 'zunits'     #IGNORE
2474    zunits = L[1].strip()                       #TEXT
2475
2476    L = metalines[4].strip().split()
2477    assert L[0].strip().lower() == 'units'
2478    units = L[1].strip()                        #TEXT
2479
2480    L = metalines[5].strip().split()
2481    assert L[0].strip().lower() == 'spheroid'   #IGNORE
2482    spheroid = L[1].strip()                     #TEXT
2483
2484    L = metalines[6].strip().split()
2485    assert L[0].strip().lower() == 'xshift'
2486    false_easting = float(L[1].strip())
2487
2488    L = metalines[7].strip().split()
2489    assert L[0].strip().lower() == 'yshift'
2490    false_northing = float(L[1].strip())
2491
2492    #print false_easting, false_northing, zone, datum
2493
2494
2495    ###########################################
2496    #Read DEM data
2497
2498    datafile = open(basename_in + '.asc')
2499
2500    if verbose: print 'Reading DEM from %s' %(basename_in + '.asc')
2501    lines = datafile.readlines()
2502    datafile.close()
2503
2504    if verbose: print 'Got', len(lines), ' lines'
2505
2506    ncols = int(lines[0].split()[1].strip())
2507    nrows = int(lines[1].split()[1].strip())
2508    xllcorner = float(lines[2].split()[1].strip())
2509    yllcorner = float(lines[3].split()[1].strip())
2510    cellsize = float(lines[4].split()[1].strip())
2511    NODATA_value = int(lines[5].split()[1].strip())
2512
2513    assert len(lines) == nrows + 6
2514
2515
2516    ##########################################
2517
2518
2519    if basename_out == None:
2520        netcdfname = root + '.dem'
2521    else:
2522        netcdfname = basename_out + '.dem'
2523
2524    if verbose: print 'Store to NetCDF file %s' %netcdfname
2525    # NetCDF file definition
2526    fid = NetCDFFile(netcdfname, 'w')
2527
2528    #Create new file
2529    fid.institution = 'Geoscience Australia'
2530    fid.description = 'NetCDF DEM format for compact and portable storage ' +\
2531                      'of spatial point data'
2532
2533    fid.ncols = ncols
2534    fid.nrows = nrows
2535    fid.xllcorner = xllcorner
2536    fid.yllcorner = yllcorner
2537    fid.cellsize = cellsize
2538    fid.NODATA_value = NODATA_value
2539
2540    fid.zone = zone
2541    fid.false_easting = false_easting
2542    fid.false_northing = false_northing
2543    fid.projection = projection
2544    fid.datum = datum
2545    fid.units = units
2546
2547
2548    # dimension definitions
2549    fid.createDimension('number_of_rows', nrows)
2550    fid.createDimension('number_of_columns', ncols)
2551
2552    # variable definitions
2553    fid.createVariable('elevation', Float, ('number_of_rows',
2554                                            'number_of_columns'))
2555
2556    # Get handles to the variables
2557    elevation = fid.variables['elevation']
2558
2559    #Store data
2560    n = len(lines[6:])
2561    for i, line in enumerate(lines[6:]):
2562        fields = line.split()
2563        if verbose and i%((n+10)/10)==0:
2564            print 'Processing row %d of %d' %(i, nrows)
2565
2566        elevation[i, :] = array([float(x) for x in fields])
2567
2568    fid.close()
2569
2570
2571
2572
2573
2574def ferret2sww(basename_in, basename_out = None,
2575               verbose = False,
2576               minlat = None, maxlat = None,
2577               minlon = None, maxlon = None,
2578               mint = None, maxt = None, mean_stage = 0,
2579               origin = None, zscale = 1,
2580               fail_on_NaN = True,
2581               NaN_filler = 0,
2582               elevation = None,
2583               inverted_bathymetry = True
2584               ): #FIXME: Bathymetry should be obtained
2585                                  #from MOST somehow.
2586                                  #Alternatively from elsewhere
2587                                  #or, as a last resort,
2588                                  #specified here.
2589                                  #The value of -100 will work
2590                                  #for the Wollongong tsunami
2591                                  #scenario but is very hacky
2592    """Convert MOST and 'Ferret' NetCDF format for wave propagation to
2593    sww format native to abstract_2d_finite_volumes.
2594
2595    Specify only basename_in and read files of the form
2596    basefilename_ha.nc, basefilename_ua.nc, basefilename_va.nc containing
2597    relative height, x-velocity and y-velocity, respectively.
2598
2599    Also convert latitude and longitude to UTM. All coordinates are
2600    assumed to be given in the GDA94 datum.
2601
2602    min's and max's: If omitted - full extend is used.
2603    To include a value min may equal it, while max must exceed it.
2604    Lat and lon are assuemd to be in decimal degrees
2605
2606    origin is a 3-tuple with geo referenced
2607    UTM coordinates (zone, easting, northing)
2608
2609    nc format has values organised as HA[TIME, LATITUDE, LONGITUDE]
2610    which means that longitude is the fastest
2611    varying dimension (row major order, so to speak)
2612
2613    ferret2sww uses grid points as vertices in a triangular grid
2614    counting vertices from lower left corner upwards, then right
2615    """
2616
2617    import os
2618    from Scientific.IO.NetCDF import NetCDFFile
2619    from Numeric import Float, Int, Int32, searchsorted, zeros, array
2620    from Numeric import allclose, around
2621
2622    precision = Float
2623
2624    msg = 'Must use latitudes and longitudes for minlat, maxlon etc'
2625
2626    if minlat != None:
2627        assert -90 < minlat < 90 , msg
2628    if maxlat != None:
2629        assert -90 < maxlat < 90 , msg
2630        if minlat != None:
2631            assert maxlat > minlat
2632    if minlon != None:
2633        assert -180 < minlon < 180 , msg
2634    if maxlon != None:
2635        assert -180 < maxlon < 180 , msg
2636        if minlon != None:
2637            assert maxlon > minlon
2638       
2639
2640
2641    #Get NetCDF data
2642    if verbose: print 'Reading files %s_*.nc' %basename_in
2643    #print "basename_in + '_ha.nc'",basename_in + '_ha.nc'
2644    file_h = NetCDFFile(basename_in + '_ha.nc', 'r') #Wave amplitude (cm)
2645    file_u = NetCDFFile(basename_in + '_ua.nc', 'r') #Velocity (x) (cm/s)
2646    file_v = NetCDFFile(basename_in + '_va.nc', 'r') #Velocity (y) (cm/s)
2647    file_e = NetCDFFile(basename_in + '_e.nc', 'r')  #Elevation (z) (m)
2648
2649    if basename_out is None:
2650        swwname = basename_in + '.sww'
2651    else:
2652        swwname = basename_out + '.sww'
2653
2654    # Get dimensions of file_h
2655    for dimension in file_h.dimensions.keys():
2656        if dimension[:3] == 'LON':
2657            dim_h_longitude = dimension
2658        if dimension[:3] == 'LAT':
2659            dim_h_latitude = dimension
2660        if dimension[:4] == 'TIME':
2661            dim_h_time = dimension
2662
2663#    print 'long:', dim_h_longitude
2664#    print 'lats:', dim_h_latitude
2665#    print 'times:', dim_h_time
2666
2667    times = file_h.variables[dim_h_time]
2668    latitudes = file_h.variables[dim_h_latitude]
2669    longitudes = file_h.variables[dim_h_longitude]
2670   
2671    # get dimensions for file_e
2672    for dimension in file_e.dimensions.keys():
2673        if dimension[:3] == 'LON':
2674            dim_e_longitude = dimension
2675        if dimension[:3] == 'LAT':
2676            dim_e_latitude = dimension
2677
2678    # get dimensions for file_u
2679    for dimension in file_u.dimensions.keys():
2680        if dimension[:3] == 'LON':
2681            dim_u_longitude = dimension
2682        if dimension[:3] == 'LAT':
2683            dim_u_latitude = dimension
2684        if dimension[:4] == 'TIME':
2685            dim_u_time = dimension
2686           
2687    # get dimensions for file_v
2688    for dimension in file_v.dimensions.keys():
2689        if dimension[:3] == 'LON':
2690            dim_v_longitude = dimension
2691        if dimension[:3] == 'LAT':
2692            dim_v_latitude = dimension
2693        if dimension[:4] == 'TIME':
2694            dim_v_time = dimension
2695
2696
2697    # Precision used by most for lat/lon is 4 or 5 decimals
2698    e_lat = around(file_e.variables[dim_e_latitude][:], 5)
2699    e_lon = around(file_e.variables[dim_e_longitude][:], 5)
2700
2701    # Check that files are compatible
2702    assert allclose(latitudes, file_u.variables[dim_u_latitude])
2703    assert allclose(latitudes, file_v.variables[dim_v_latitude])
2704    assert allclose(latitudes, e_lat)
2705
2706    assert allclose(longitudes, file_u.variables[dim_u_longitude])
2707    assert allclose(longitudes, file_v.variables[dim_v_longitude])
2708    assert allclose(longitudes, e_lon)
2709
2710    if mint is None:
2711        jmin = 0
2712        mint = times[0]       
2713    else:
2714        jmin = searchsorted(times, mint)
2715       
2716    if maxt is None:
2717        jmax = len(times)
2718        maxt = times[-1]
2719    else:
2720        jmax = searchsorted(times, maxt)
2721
2722    #print "latitudes[:]",latitudes[:]
2723    #print "longitudes[:]",longitudes [:]
2724    kmin, kmax, lmin, lmax = _get_min_max_indexes(latitudes[:],
2725                                                  longitudes[:],
2726                                                  minlat, maxlat,
2727                                                  minlon, maxlon)
2728
2729
2730    times = times[jmin:jmax]
2731    latitudes = latitudes[kmin:kmax]
2732    longitudes = longitudes[lmin:lmax]
2733
2734    #print "latitudes[:]",latitudes[:]
2735    #print "longitudes[:]",longitudes [:]
2736
2737    if verbose: print 'cropping'
2738    zname = 'ELEVATION'
2739
2740    amplitudes = file_h.variables['HA'][jmin:jmax, kmin:kmax, lmin:lmax]
2741    uspeed = file_u.variables['UA'][jmin:jmax, kmin:kmax, lmin:lmax] #Lon
2742    vspeed = file_v.variables['VA'][jmin:jmax, kmin:kmax, lmin:lmax] #Lat
2743    elevations = file_e.variables[zname][kmin:kmax, lmin:lmax]
2744
2745    #    if latitudes2[0]==latitudes[0] and latitudes2[-1]==latitudes[-1]:
2746    #        elevations = file_e.variables['ELEVATION'][kmin:kmax, lmin:lmax]
2747    #    elif latitudes2[0]==latitudes[-1] and latitudes2[-1]==latitudes[0]:
2748    #        from Numeric import asarray
2749    #        elevations=elevations.tolist()
2750    #        elevations.reverse()
2751    #        elevations=asarray(elevations)
2752    #    else:
2753    #        from Numeric import asarray
2754    #        elevations=elevations.tolist()
2755    #        elevations.reverse()
2756    #        elevations=asarray(elevations)
2757    #        'print hmmm'
2758
2759
2760
2761    #Get missing values
2762    nan_ha = file_h.variables['HA'].missing_value[0]
2763    nan_ua = file_u.variables['UA'].missing_value[0]
2764    nan_va = file_v.variables['VA'].missing_value[0]
2765    if hasattr(file_e.variables[zname],'missing_value'):
2766        nan_e  = file_e.variables[zname].missing_value[0]
2767    else:
2768        nan_e = None
2769
2770    #Cleanup
2771    from Numeric import sometrue
2772
2773    missing = (amplitudes == nan_ha)
2774    if sometrue (missing):
2775        if fail_on_NaN:
2776            msg = 'NetCDFFile %s contains missing values'\
2777                  %(basename_in+'_ha.nc')
2778            raise DataMissingValuesError, msg
2779        else:
2780            amplitudes = amplitudes*(missing==0) + missing*NaN_filler
2781
2782    missing = (uspeed == nan_ua)
2783    if sometrue (missing):
2784        if fail_on_NaN:
2785            msg = 'NetCDFFile %s contains missing values'\
2786                  %(basename_in+'_ua.nc')
2787            raise DataMissingValuesError, msg
2788        else:
2789            uspeed = uspeed*(missing==0) + missing*NaN_filler
2790
2791    missing = (vspeed == nan_va)
2792    if sometrue (missing):
2793        if fail_on_NaN:
2794            msg = 'NetCDFFile %s contains missing values'\
2795                  %(basename_in+'_va.nc')
2796            raise DataMissingValuesError, msg
2797        else:
2798            vspeed = vspeed*(missing==0) + missing*NaN_filler
2799
2800
2801    missing = (elevations == nan_e)
2802    if sometrue (missing):
2803        if fail_on_NaN:
2804            msg = 'NetCDFFile %s contains missing values'\
2805                  %(basename_in+'_e.nc')
2806            raise DataMissingValuesError, msg
2807        else:
2808            elevations = elevations*(missing==0) + missing*NaN_filler
2809
2810    #######
2811
2812
2813
2814    number_of_times = times.shape[0]
2815    number_of_latitudes = latitudes.shape[0]
2816    number_of_longitudes = longitudes.shape[0]
2817
2818    assert amplitudes.shape[0] == number_of_times
2819    assert amplitudes.shape[1] == number_of_latitudes
2820    assert amplitudes.shape[2] == number_of_longitudes
2821
2822    if verbose:
2823        print '------------------------------------------------'
2824        print 'Statistics:'
2825        print '  Extent (lat/lon):'
2826        print '    lat in [%f, %f], len(lat) == %d'\
2827              %(min(latitudes.flat), max(latitudes.flat),
2828                len(latitudes.flat))
2829        print '    lon in [%f, %f], len(lon) == %d'\
2830              %(min(longitudes.flat), max(longitudes.flat),
2831                len(longitudes.flat))
2832        print '    t in [%f, %f], len(t) == %d'\
2833              %(min(times.flat), max(times.flat), len(times.flat))
2834
2835        q = amplitudes.flat
2836        name = 'Amplitudes (ha) [cm]'
2837        print %s in [%f, %f]' %(name, min(q), max(q))
2838
2839        q = uspeed.flat
2840        name = 'Speeds (ua) [cm/s]'
2841        print %s in [%f, %f]' %(name, min(q), max(q))
2842
2843        q = vspeed.flat
2844        name = 'Speeds (va) [cm/s]'
2845        print %s in [%f, %f]' %(name, min(q), max(q))
2846
2847        q = elevations.flat
2848        name = 'Elevations (e) [m]'
2849        print %s in [%f, %f]' %(name, min(q), max(q))
2850
2851
2852    #print number_of_latitudes, number_of_longitudes
2853    number_of_points = number_of_latitudes*number_of_longitudes
2854    number_of_volumes = (number_of_latitudes-1)*(number_of_longitudes-1)*2
2855
2856
2857    file_h.close()
2858    file_u.close()
2859    file_v.close()
2860    file_e.close()
2861
2862
2863    # NetCDF file definition
2864    outfile = NetCDFFile(swwname, 'w')
2865
2866    description = 'Converted from Ferret files: %s, %s, %s, %s'\
2867                  %(basename_in + '_ha.nc',
2868                    basename_in + '_ua.nc',
2869                    basename_in + '_va.nc',
2870                    basename_in + '_e.nc')
2871   
2872    #Create new file
2873    starttime = times[0]
2874    sww = Write_sww()
2875    sww.header(outfile, times, number_of_volumes,
2876                         number_of_points, description=description,
2877                         verbose=verbose)
2878
2879    #Store
2880    from anuga.coordinate_transforms.redfearn import redfearn
2881    x = zeros(number_of_points, Float)  #Easting
2882    y = zeros(number_of_points, Float)  #Northing
2883
2884
2885    if verbose: print 'Making triangular grid'
2886    #Check zone boundaries
2887    refzone, _, _ = redfearn(latitudes[0],longitudes[0])
2888
2889    vertices = {}
2890    i = 0
2891    for k, lat in enumerate(latitudes):       #Y direction
2892        for l, lon in enumerate(longitudes):  #X direction
2893
2894            vertices[l,k] = i
2895
2896            zone, easting, northing = redfearn(lat,lon)
2897
2898            msg = 'Zone boundary crossed at longitude =', lon
2899            #assert zone == refzone, msg
2900            #print '%7.2f %7.2f %8.2f %8.2f' %(lon, lat, easting, northing)
2901            x[i] = easting
2902            y[i] = northing
2903            i += 1
2904
2905    #Construct 2 triangles per 'rectangular' element
2906    volumes = []
2907    for l in range(number_of_longitudes-1):    #X direction
2908        for k in range(number_of_latitudes-1): #Y direction
2909            v1 = vertices[l,k+1]
2910            v2 = vertices[l,k]
2911            v3 = vertices[l+1,k+1]
2912            v4 = vertices[l+1,k]
2913
2914            volumes.append([v1,v2,v3]) #Upper element
2915            volumes.append([v4,v3,v2]) #Lower element
2916
2917    volumes = array(volumes)
2918
2919    if origin is None:
2920        origin = Geo_reference(refzone,min(x),min(y))
2921    geo_ref = write_NetCDF_georeference(origin, outfile)
2922   
2923    if elevation is not None:
2924        z = elevation
2925    else:
2926        if inverted_bathymetry:
2927            z = -1*elevations
2928        else:
2929            z = elevations
2930    #FIXME: z should be obtained from MOST and passed in here
2931
2932    from Numeric import resize
2933    z = resize(z,outfile.variables['z'][:].shape)
2934    outfile.variables['x'][:] = x - geo_ref.get_xllcorner()
2935    outfile.variables['y'][:] = y - geo_ref.get_yllcorner()
2936    outfile.variables['z'][:] = z             #FIXME HACK for bacwards compat.
2937    outfile.variables['elevation'][:] = z
2938    outfile.variables['volumes'][:] = volumes.astype(Int32) #For Opteron 64
2939
2940
2941
2942    #Time stepping
2943    stage = outfile.variables['stage']
2944    xmomentum = outfile.variables['xmomentum']
2945    ymomentum = outfile.variables['ymomentum']
2946
2947    if verbose: print 'Converting quantities'
2948    n = len(times)
2949    for j in range(n):
2950        if verbose and j%((n+10)/10)==0: print '  Doing %d of %d' %(j, n)
2951        i = 0
2952        for k in range(number_of_latitudes):      #Y direction
2953            for l in range(number_of_longitudes): #X direction
2954                w = zscale*amplitudes[j,k,l]/100 + mean_stage
2955                stage[j,i] = w
2956                h = w - z[i]
2957                xmomentum[j,i] = uspeed[j,k,l]/100*h
2958                ymomentum[j,i] = vspeed[j,k,l]/100*h
2959                i += 1
2960
2961    #outfile.close()
2962
2963    #FIXME: Refactor using code from file_function.statistics
2964    #Something like print swwstats(swwname)
2965    if verbose:
2966        x = outfile.variables['x'][:]
2967        y = outfile.variables['y'][:]
2968        print '------------------------------------------------'
2969        print 'Statistics of output file:'
2970        print '  Name: %s' %swwname
2971        print '  Reference:'
2972        print '    Lower left corner: [%f, %f]'\
2973              %(geo_ref.get_xllcorner(), geo_ref.get_yllcorner())
2974        print ' Start time: %f' %starttime
2975        print '    Min time: %f' %mint
2976        print '    Max time: %f' %maxt
2977        print '  Extent:'
2978        print '    x [m] in [%f, %f], len(x) == %d'\
2979              %(min(x.flat), max(x.flat), len(x.flat))
2980        print '    y [m] in [%f, %f], len(y) == %d'\
2981              %(min(y.flat), max(y.flat), len(y.flat))
2982        print '    t [s] in [%f, %f], len(t) == %d'\
2983              %(min(times), max(times), len(times))
2984        print '  Quantities [SI units]:'
2985        for name in ['stage', 'xmomentum', 'ymomentum', 'elevation']:
2986            q = outfile.variables[name][:].flat
2987            print '    %s in [%f, %f]' %(name, min(q), max(q))
2988
2989
2990
2991    outfile.close()
2992
2993
2994
2995
2996
2997def timefile2netcdf(filename, quantity_names=None, time_as_seconds=False):
2998    """Template for converting typical text files with time series to
2999    NetCDF tms file.
3000
3001
3002    The file format is assumed to be either two fields separated by a comma:
3003
3004        time [DD/MM/YY hh:mm:ss], value0 value1 value2 ...
3005
3006    E.g
3007
3008      31/08/04 00:00:00, 1.328223 0 0
3009      31/08/04 00:15:00, 1.292912 0 0
3010
3011    or time (seconds), value0 value1 value2 ...
3012   
3013      0.0, 1.328223 0 0
3014      0.1, 1.292912 0 0
3015
3016    will provide a time dependent function f(t) with three attributes
3017
3018    filename is assumed to be the rootname with extenisons .txt and .sww
3019    """
3020
3021    import time, calendar
3022    from Numeric import array
3023    from anuga.config import time_format
3024    from anuga.utilities.numerical_tools import ensure_numeric
3025
3026
3027
3028    fid = open(filename + '.txt')
3029    line = fid.readline()
3030    fid.close()
3031
3032    fields = line.split(',')
3033    msg = 'File %s must have the format date, value0 value1 value2 ...'
3034    assert len(fields) == 2, msg
3035
3036    if not time_as_seconds:
3037        try:
3038            starttime = calendar.timegm(time.strptime(fields[0], time_format))
3039        except ValueError:
3040            msg = 'First field in file %s must be' %filename
3041            msg += ' date-time with format %s.\n' %time_format
3042            msg += 'I got %s instead.' %fields[0]
3043            raise DataTimeError, msg
3044    else:
3045        try:
3046            starttime = float(fields[0])
3047        except Error:
3048            msg = "Bad time format"
3049            raise DataTimeError, msg
3050
3051
3052    #Split values
3053    values = []
3054    for value in fields[1].split():
3055        values.append(float(value))
3056
3057    q = ensure_numeric(values)
3058
3059    msg = 'ERROR: File must contain at least one independent value'
3060    assert len(q.shape) == 1, msg
3061
3062
3063
3064    #Read times proper
3065    from Numeric import zeros, Float, alltrue
3066    from anuga.config import time_format
3067    import time, calendar
3068
3069    fid = open(filename + '.txt')
3070    lines = fid.readlines()
3071    fid.close()
3072
3073    N = len(lines)
3074    d = len(q)
3075
3076    T = zeros(N, Float)       #Time
3077    Q = zeros((N, d), Float)  #Values
3078
3079    for i, line in enumerate(lines):
3080        fields = line.split(',')
3081        if not time_as_seconds:
3082            realtime = calendar.timegm(time.strptime(fields[0], time_format))
3083        else:
3084             realtime = float(fields[0])
3085        T[i] = realtime - starttime
3086
3087        for j, value in enumerate(fields[1].split()):
3088            Q[i, j] = float(value)
3089
3090    msg = 'File %s must list time as a monotonuosly ' %filename
3091    msg += 'increasing sequence'
3092    assert alltrue( T[1:] - T[:-1] > 0 ), msg
3093
3094    #Create NetCDF file
3095    from Scientific.IO.NetCDF import NetCDFFile
3096
3097    fid = NetCDFFile(filename + '.tms', 'w')
3098
3099
3100    fid.institution = 'Geoscience Australia'
3101    fid.description = 'Time series'
3102
3103
3104    #Reference point
3105    #Start time in seconds since the epoch (midnight 1/1/1970)
3106    #FIXME: Use Georef
3107    fid.starttime = starttime
3108
3109    # dimension definitions
3110    #fid.createDimension('number_of_volumes', self.number_of_volumes)
3111    #fid.createDimension('number_of_vertices', 3)
3112
3113
3114    fid.createDimension('number_of_timesteps', len(T))
3115
3116    fid.createVariable('time', Float, ('number_of_timesteps',))
3117
3118    fid.variables['time'][:] = T
3119
3120    for i in range(Q.shape[1]):
3121        try:
3122            name = quantity_names[i]
3123        except:
3124            name = 'Attribute%d'%i
3125
3126        fid.createVariable(name, Float, ('number_of_timesteps',))
3127        fid.variables[name][:] = Q[:,i]
3128
3129    fid.close()
3130
3131
3132def extent_sww(file_name):
3133    """
3134    Read in an sww file.
3135
3136    Input;
3137    file_name - the sww file
3138
3139    Output;
3140    z - Vector of bed elevation
3141    volumes - Array.  Each row has 3 values, representing
3142    the vertices that define the volume
3143    time - Vector of the times where there is stage information
3144    stage - array with respect to time and vertices (x,y)
3145    """
3146
3147
3148    from Scientific.IO.NetCDF import NetCDFFile
3149
3150    #Check contents
3151    #Get NetCDF
3152    fid = NetCDFFile(file_name, 'r')
3153
3154    # Get the variables
3155    x = fid.variables['x'][:]
3156    y = fid.variables['y'][:]
3157    stage = fid.variables['stage'][:]
3158    #print "stage",stage
3159    #print "stage.shap",stage.shape
3160    #print "min(stage.flat), mpythonax(stage.flat)",min(stage.flat), max(stage.flat)
3161    #print "min(stage)",min(stage)
3162
3163    fid.close()
3164
3165    return [min(x),max(x),min(y),max(y),min(stage.flat),max(stage.flat)]
3166
3167
3168def sww2domain(filename,boundary=None,t=None,\
3169               fail_if_NaN=True,NaN_filler=0\
3170               ,verbose = False,very_verbose = False):
3171    """
3172    Usage: domain = sww2domain('file.sww',t=time (default = last time in file))
3173
3174    Boundary is not recommended if domain.smooth is not selected, as it
3175    uses unique coordinates, but not unique boundaries. This means that
3176    the boundary file will not be compatable with the coordinates, and will
3177    give a different final boundary, or crash.
3178    """
3179    NaN=9.969209968386869e+036
3180    #initialise NaN.
3181
3182    from Scientific.IO.NetCDF import NetCDFFile
3183    from shallow_water import Domain
3184    from Numeric import asarray, transpose, resize
3185
3186    if verbose: print 'Reading from ', filename
3187    fid = NetCDFFile(filename, 'r')    #Open existing file for read
3188    time = fid.variables['time']       #Timesteps
3189    if t is None:
3190        t = time[-1]
3191    time_interp = get_time_interp(time,t)
3192
3193    # Get the variables as Numeric arrays
3194    x = fid.variables['x'][:]             #x-coordinates of vertices
3195    y = fid.variables['y'][:]             #y-coordinates of vertices
3196    elevation = fid.variables['elevation']     #Elevation
3197    stage = fid.variables['stage']     #Water level
3198    xmomentum = fid.variables['xmomentum']   #Momentum in the x-direction
3199    ymomentum = fid.variables['ymomentum']   #Momentum in the y-direction
3200
3201    starttime = fid.starttime[0]
3202    volumes = fid.variables['volumes'][:] #Connectivity
3203    coordinates=transpose(asarray([x.tolist(),y.tolist()]))
3204
3205    conserved_quantities = []
3206    interpolated_quantities = {}
3207    other_quantities = []
3208
3209    # get geo_reference
3210    #sww files don't have to have a geo_ref
3211    try:
3212        geo_reference = Geo_reference(NetCDFObject=fid)
3213    except: #AttributeError, e:
3214        geo_reference = None
3215
3216    if verbose: print '    getting quantities'
3217    for quantity in fid.variables.keys():
3218        dimensions = fid.variables[quantity].dimensions
3219        if 'number_of_timesteps' in dimensions:
3220            conserved_quantities.append(quantity)
3221            interpolated_quantities[quantity]=\
3222                  interpolated_quantity(fid.variables[quantity][:],time_interp)
3223        else: other_quantities.append(quantity)
3224
3225    other_quantities.remove('x')
3226    other_quantities.remove('y')
3227    other_quantities.remove('z')
3228    other_quantities.remove('volumes')
3229    try:
3230        other_quantities.remove('stage_range')
3231        other_quantities.remove('xmomentum_range')
3232        other_quantities.remove('ymomentum_range')
3233        other_quantities.remove('elevation_range')
3234    except:
3235        pass
3236       
3237
3238    conserved_quantities.remove('time')
3239
3240    if verbose: print '    building domain'
3241    #    From domain.Domain:
3242    #    domain = Domain(coordinates, volumes,\
3243    #                    conserved_quantities = conserved_quantities,\
3244    #                    other_quantities = other_quantities,zone=zone,\
3245    #                    xllcorner=xllcorner, yllcorner=yllcorner)
3246
3247    #   From shallow_water.Domain:
3248    coordinates=coordinates.tolist()
3249    volumes=volumes.tolist()
3250    #FIXME:should this be in mesh?(peter row)
3251    if fid.smoothing == 'Yes': unique = False
3252    else: unique = True
3253    if unique:
3254        coordinates,volumes,boundary=weed(coordinates,volumes,boundary)
3255
3256
3257    try:
3258        domain = Domain(coordinates, volumes, boundary)
3259    except AssertionError, e:
3260        fid.close()
3261        msg = 'Domain could not be created: %s. Perhaps use "fail_if_NaN=False and NaN_filler = ..."' %e
3262        raise DataDomainError, msg
3263
3264    if not boundary is None:
3265        domain.boundary = boundary
3266
3267    domain.geo_reference = geo_reference
3268
3269    domain.starttime=float(starttime)+float(t)
3270    domain.time=0.0
3271
3272    for quantity in other_quantities:
3273        try:
3274            NaN = fid.variables[quantity].missing_value
3275        except:
3276            pass #quantity has no missing_value number
3277        X = fid.variables[quantity][:]
3278        if very_verbose:
3279            print '       ',quantity
3280            print '        NaN =',NaN
3281            print '        max(X)'
3282            print '       ',max(X)
3283            print '        max(X)==NaN'
3284            print '       ',max(X)==NaN
3285            print ''
3286        if (max(X)==NaN) or (min(X)==NaN):
3287            if fail_if_NaN:
3288                msg = 'quantity "%s" contains no_data entry'%quantity
3289                raise DataMissingValuesError, msg
3290            else:
3291                data = (X<>NaN)
3292                X = (X*data)+(data==0)*NaN_filler
3293        if unique:
3294            X = resize(X,(len(X)/3,3))
3295        domain.set_quantity(quantity,X)
3296    #
3297    for quantity in conserved_quantities:
3298        try:
3299            NaN = fid.variables[quantity].missing_value
3300        except:
3301            pass #quantity has no missing_value number
3302        X = interpolated_quantities[quantity]
3303        if very_verbose:
3304            print '       ',quantity
3305            print '        NaN =',NaN
3306            print '        max(X)'
3307            print '       ',max(X)
3308            print '        max(X)==NaN'
3309            print '       ',max(X)==NaN
3310            print ''
3311        if (max(X)==NaN) or (min(X)==NaN):
3312            if fail_if_NaN:
3313                msg = 'quantity "%s" contains no_data entry'%quantity
3314                raise DataMissingValuesError, msg
3315            else:
3316                data = (X<>NaN)
3317                X = (X*data)+(data==0)*NaN_filler
3318        if unique:
3319            X = resize(X,(X.shape[0]/3,3))
3320        domain.set_quantity(quantity,X)
3321
3322    fid.close()
3323    return domain
3324
3325def interpolated_quantity(saved_quantity,time_interp):
3326
3327    #given an index and ratio, interpolate quantity with respect to time.
3328    index,ratio = time_interp
3329    Q = saved_quantity
3330    if ratio > 0:
3331        q = (1-ratio)*Q[index]+ ratio*Q[index+1]
3332    else:
3333        q = Q[index]
3334    #Return vector of interpolated values
3335    return q
3336
3337def get_time_interp(time,t=None):
3338    #Finds the ratio and index for time interpolation.
3339    #It is borrowed from previous abstract_2d_finite_volumes code.
3340    if t is None:
3341        t=time[-1]
3342        index = -1
3343        ratio = 0.
3344    else:
3345        T = time
3346        tau = t
3347        index=0
3348        msg = 'Time interval derived from file %s [%s:%s]'\
3349            %('FIXMEfilename', T[0], T[-1])
3350        msg += ' does not match model time: %s' %tau
3351        if tau < time[0]: raise DataTimeError, msg
3352        if tau > time[-1]: raise DataTimeError, msg
3353        while tau > time[index]: index += 1
3354        while tau < time[index]: index -= 1
3355        if tau == time[index]:
3356            #Protect against case where tau == time[-1] (last time)
3357            # - also works in general when tau == time[i]
3358            ratio = 0
3359        else:
3360            #t is now between index and index+1
3361            ratio = (tau - time[index])/(time[index+1] - time[index])
3362    return (index,ratio)
3363
3364
3365def weed(coordinates,volumes,boundary = None):
3366    if type(coordinates)==ArrayType:
3367        coordinates = coordinates.tolist()
3368    if type(volumes)==ArrayType:
3369        volumes = volumes.tolist()
3370
3371    unique = False
3372    point_dict = {}
3373    same_point = {}
3374    for i in range(len(coordinates)):
3375        point = tuple(coordinates[i])
3376        if point_dict.has_key(point):
3377            unique = True
3378            same_point[i]=point
3379            #to change all point i references to point j
3380        else:
3381            point_dict[point]=i
3382            same_point[i]=point
3383
3384    coordinates = []
3385    i = 0
3386    for point in point_dict.keys():
3387        point = tuple(point)
3388        coordinates.append(list(point))
3389        point_dict[point]=i
3390        i+=1
3391
3392
3393    for volume in volumes:
3394        for i in range(len(volume)):
3395            index = volume[i]
3396            if index>-1:
3397                volume[i]=point_dict[same_point[index]]
3398
3399    new_boundary = {}
3400    if not boundary is None:
3401        for segment in boundary.keys():
3402            point0 = point_dict[same_point[segment[0]]]
3403            point1 = point_dict[same_point[segment[1]]]
3404            label = boundary[segment]
3405            #FIXME should the bounday attributes be concaterated
3406            #('exterior, pond') or replaced ('pond')(peter row)
3407
3408            if new_boundary.has_key((point0,point1)):
3409                new_boundary[(point0,point1)]=new_boundary[(point0,point1)]#\
3410                                              #+','+label
3411
3412            elif new_boundary.has_key((point1,point0)):
3413                new_boundary[(point1,point0)]=new_boundary[(point1,point0)]#\
3414                                              #+','+label
3415            else: new_boundary[(point0,point1)]=label
3416
3417        boundary = new_boundary
3418
3419    return coordinates,volumes,boundary
3420
3421
3422def decimate_dem(basename_in, stencil, cellsize_new, basename_out=None,
3423                 verbose=False):
3424    """Read Digitial Elevation model from the following NetCDF format (.dem)
3425
3426    Example:
3427
3428    ncols         3121
3429    nrows         1800
3430    xllcorner     722000
3431    yllcorner     5893000
3432    cellsize      25
3433    NODATA_value  -9999
3434    138.3698 137.4194 136.5062 135.5558 ..........
3435
3436    Decimate data to cellsize_new using stencil and write to NetCDF dem format.
3437    """
3438
3439    import os
3440    from Scientific.IO.NetCDF import NetCDFFile
3441    from Numeric import Float, zeros, sum, reshape, equal
3442
3443    root = basename_in
3444    inname = root + '.dem'
3445
3446    #Open existing netcdf file to read
3447    infile = NetCDFFile(inname, 'r')
3448    if verbose: print 'Reading DEM from %s' %inname
3449
3450    #Read metadata
3451    ncols = infile.ncols[0]
3452    nrows = infile.nrows[0]
3453    xllcorner = infile.xllcorner[0]
3454    yllcorner = infile.yllcorner[0]
3455    cellsize = infile.cellsize[0]
3456    NODATA_value = infile.NODATA_value[0]
3457    zone = infile.zone[0]
3458    false_easting = infile.false_easting[0]
3459    false_northing = infile.false_northing[0]
3460    projection = infile.projection
3461    datum = infile.datum
3462    units = infile.units
3463
3464    dem_elevation = infile.variables['elevation']
3465
3466    #Get output file name
3467    if basename_out == None:
3468        outname = root + '_' + repr(cellsize_new) + '.dem'
3469    else:
3470        outname = basename_out + '.dem'
3471
3472    if verbose: print 'Write decimated NetCDF file to %s' %outname
3473
3474    #Determine some dimensions for decimated grid
3475    (nrows_stencil, ncols_stencil) = stencil.shape
3476    x_offset = ncols_stencil / 2
3477    y_offset = nrows_stencil / 2
3478    cellsize_ratio = int(cellsize_new / cellsize)
3479    ncols_new = 1 + (ncols - ncols_stencil) / cellsize_ratio
3480    nrows_new = 1 + (nrows - nrows_stencil) / cellsize_ratio
3481
3482    #Open netcdf file for output
3483    outfile = NetCDFFile(outname, 'w')
3484
3485    #Create new file
3486    outfile.institution = 'Geoscience Australia'
3487    outfile.description = 'NetCDF DEM format for compact and portable storage ' +\
3488                           'of spatial point data'
3489    #Georeferencing
3490    outfile.zone = zone
3491    outfile.projection = projection
3492    outfile.datum = datum
3493    outfile.units = units
3494
3495    outfile.cellsize = cellsize_new
3496    outfile.NODATA_value = NODATA_value
3497    outfile.false_easting = false_easting
3498    outfile.false_northing = false_northing
3499
3500    outfile.xllcorner = xllcorner + (x_offset * cellsize)
3501    outfile.yllcorner = yllcorner + (y_offset * cellsize)
3502    outfile.ncols = ncols_new
3503    outfile.nrows = nrows_new
3504
3505    # dimension definition
3506    outfile.createDimension('number_of_points', nrows_new*ncols_new)
3507
3508    # variable definition
3509    outfile.createVariable('elevation', Float, ('number_of_points',))
3510
3511    # Get handle to the variable
3512    elevation = outfile.variables['elevation']
3513
3514    dem_elevation_r = reshape(dem_elevation, (nrows, ncols))
3515
3516    #Store data
3517    global_index = 0
3518    for i in range(nrows_new):
3519        if verbose: print 'Processing row %d of %d' %(i, nrows_new)
3520        lower_index = global_index
3521        telev =  zeros(ncols_new, Float)
3522        local_index = 0
3523        trow = i * cellsize_ratio
3524
3525        for j in range(ncols_new):
3526            tcol = j * cellsize_ratio
3527            tmp = dem_elevation_r[trow:trow+nrows_stencil, tcol:tcol+ncols_stencil]
3528
3529            #if dem contains 1 or more NODATA_values set value in
3530            #decimated dem to NODATA_value, else compute decimated
3531            #value using stencil
3532            if sum(sum(equal(tmp, NODATA_value))) > 0:
3533                telev[local_index] = NODATA_value
3534            else:
3535                telev[local_index] = sum(sum(tmp * stencil))
3536
3537            global_index += 1
3538            local_index += 1
3539
3540        upper_index = global_index
3541
3542        elevation[lower_index:upper_index] = telev
3543
3544    assert global_index == nrows_new*ncols_new, 'index not equal to number of points'
3545
3546    infile.close()
3547    outfile.close()
3548
3549
3550
3551
3552def tsh2sww(filename, verbose=False): #test_tsh2sww
3553    """
3554    to check if a tsh/msh file 'looks' good.
3555    """
3556
3557
3558    if verbose == True:print 'Creating domain from', filename
3559    domain = pmesh_to_domain_instance(filename, Domain)
3560    if verbose == True:print "Number of triangles = ", len(domain)
3561
3562    domain.smooth = True
3563    domain.format = 'sww'   #Native netcdf visualisation format
3564    file_path, filename = path.split(filename)
3565    filename, ext = path.splitext(filename)
3566    domain.set_name(filename)   
3567    domain.reduction = mean
3568    if verbose == True:print "file_path",file_path
3569    if file_path == "":file_path = "."
3570    domain.set_datadir(file_path)
3571
3572    if verbose == True:
3573        print "Output written to " + domain.get_datadir() + sep + \
3574              domain.get_name() + "." + domain.format
3575    sww = get_dataobject(domain)
3576    sww.store_connectivity()
3577    sww.store_timestep('stage')
3578
3579
3580def asc_csiro2sww(bath_dir,
3581                  elevation_dir,
3582                  ucur_dir,
3583                  vcur_dir,
3584                  sww_file,
3585                  minlat = None, maxlat = None,
3586                  minlon = None, maxlon = None,
3587                  zscale=1,
3588                  mean_stage = 0,
3589                  fail_on_NaN = True,
3590                  elevation_NaN_filler = 0,
3591                  bath_prefix='ba',
3592                  elevation_prefix='el',
3593                  verbose=False):
3594    """
3595    Produce an sww boundary file, from esri ascii data from CSIRO.
3596
3597    Also convert latitude and longitude to UTM. All coordinates are
3598    assumed to be given in the GDA94 datum.
3599
3600    assume:
3601    All files are in esri ascii format
3602
3603    4 types of information
3604    bathymetry
3605    elevation
3606    u velocity
3607    v velocity
3608
3609    Assumptions
3610    The metadata of all the files is the same
3611    Each type is in a seperate directory
3612    One bath file with extention .000
3613    The time period is less than 24hrs and uniform.
3614    """
3615    from Scientific.IO.NetCDF import NetCDFFile
3616
3617    from anuga.coordinate_transforms.redfearn import redfearn
3618
3619    precision = Float # So if we want to change the precision its done here
3620
3621    # go in to the bath dir and load the only file,
3622    bath_files = os.listdir(bath_dir)
3623
3624    bath_file = bath_files[0]
3625    bath_dir_file =  bath_dir + os.sep + bath_file
3626    bath_metadata,bath_grid =  _read_asc(bath_dir_file)
3627
3628    #Use the date.time of the bath file as a basis for
3629    #the start time for other files
3630    base_start = bath_file[-12:]
3631
3632    #go into the elevation dir and load the 000 file
3633    elevation_dir_file = elevation_dir  + os.sep + elevation_prefix \
3634                         + base_start
3635
3636    elevation_files = os.listdir(elevation_dir)
3637    ucur_files = os.listdir(ucur_dir)
3638    vcur_files = os.listdir(vcur_dir)
3639    elevation_files.sort()
3640    # the first elevation file should be the
3641    # file with the same base name as the bath data
3642    assert elevation_files[0] == 'el' + base_start
3643
3644    number_of_latitudes = bath_grid.shape[0]
3645    number_of_longitudes = bath_grid.shape[1]
3646    number_of_volumes = (number_of_latitudes-1)*(number_of_longitudes-1)*2
3647
3648    longitudes = [bath_metadata['xllcorner']+x*bath_metadata['cellsize'] \
3649                  for x in range(number_of_longitudes)]
3650    latitudes = [bath_metadata['yllcorner']+y*bath_metadata['cellsize'] \
3651                 for y in range(number_of_latitudes)]
3652
3653     # reverse order of lat, so the fist lat represents the first grid row
3654    latitudes.reverse()
3655
3656    kmin, kmax, lmin, lmax = _get_min_max_indexes(latitudes[:],longitudes[:],
3657                                                 minlat=minlat, maxlat=maxlat,
3658                                                 minlon=minlon, maxlon=maxlon)
3659
3660
3661    bath_grid = bath_grid[kmin:kmax,lmin:lmax]
3662    latitudes = latitudes[kmin:kmax]
3663    longitudes = longitudes[lmin:lmax]
3664    number_of_latitudes = len(latitudes)
3665    number_of_longitudes = len(longitudes)
3666    number_of_times = len(os.listdir(elevation_dir))
3667    number_of_points = number_of_latitudes*number_of_longitudes
3668    number_of_volumes = (number_of_latitudes-1)*(number_of_longitudes-1)*2
3669
3670    #Work out the times
3671    if len(elevation_files) > 1:
3672        # Assume: The time period is less than 24hrs.
3673        time_period = (int(elevation_files[1][-3:]) - \
3674                      int(elevation_files[0][-3:]))*60*60
3675        times = [x*time_period for x in range(len(elevation_files))]
3676    else:
3677        times = [0.0]
3678
3679
3680    if verbose:
3681        print '------------------------------------------------'
3682        print 'Statistics:'
3683        print '  Extent (lat/lon):'
3684        print '    lat in [%f, %f], len(lat) == %d'\
3685              %(min(latitudes), max(latitudes),
3686                len(latitudes))
3687        print '    lon in [%f, %f], len(lon) == %d'\
3688              %(min(longitudes), max(longitudes),
3689                len(longitudes))
3690        print '    t in [%f, %f], len(t) == %d'\
3691              %(min(times), max(times), len(times))
3692
3693    ######### WRITE THE SWW FILE #############
3694    # NetCDF file definition
3695    outfile = NetCDFFile(sww_file, 'w')
3696
3697    #Create new file
3698    outfile.institution = 'Geoscience Australia'
3699    outfile.description = 'Converted from XXX'
3700
3701
3702    #For sww compatibility
3703    outfile.smoothing = 'Yes'
3704    outfile.order = 1
3705
3706    #Start time in seconds since the epoch (midnight 1/1/1970)
3707    outfile.starttime = starttime = times[0]
3708
3709
3710    # dimension definitions
3711    outfile.createDimension('number_of_volumes', number_of_volumes)
3712
3713    outfile.createDimension('number_of_vertices', 3)
3714    outfile.createDimension('number_of_points', number_of_points)
3715    outfile.createDimension('number_of_timesteps', number_of_times)
3716
3717    # variable definitions
3718    outfile.createVariable('x', precision, ('number_of_points',))
3719    outfile.createVariable('y', precision, ('number_of_points',))
3720    outfile.createVariable('elevation', precision, ('number_of_points',))
3721
3722    #FIXME: Backwards compatibility
3723    outfile.createVariable('z', precision, ('number_of_points',))
3724    #################################
3725
3726    outfile.createVariable('volumes', Int, ('number_of_volumes',
3727                                            'number_of_vertices'))
3728
3729    outfile.createVariable('time', precision,
3730                           ('number_of_timesteps',))
3731
3732    outfile.createVariable('stage', precision,
3733                           ('number_of_timesteps',
3734                            'number_of_points'))
3735
3736    outfile.createVariable('xmomentum', precision,
3737                           ('number_of_timesteps',
3738                            'number_of_points'))
3739
3740    outfile.createVariable('ymomentum', precision,
3741                           ('number_of_timesteps',
3742                            'number_of_points'))
3743
3744    #Store
3745    from anuga.coordinate_transforms.redfearn import redfearn
3746    x = zeros(number_of_points, Float)  #Easting
3747    y = zeros(number_of_points, Float)  #Northing
3748
3749    if verbose: print 'Making triangular grid'
3750    #Get zone of 1st point.
3751    refzone, _, _ = redfearn(latitudes[0],longitudes[0])
3752
3753    vertices = {}
3754    i = 0
3755    for k, lat in enumerate(latitudes):
3756        for l, lon in enumerate(longitudes):
3757
3758            vertices[l,k] = i
3759
3760            zone, easting, northing = redfearn(lat,lon)
3761
3762            msg = 'Zone boundary crossed at longitude =', lon
3763            #assert zone == refzone, msg
3764            #print '%7.2f %7.2f %8.2f %8.2f' %(lon, lat, easting, northing)
3765            x[i] = easting
3766            y[i] = northing
3767            i += 1
3768
3769
3770    #Construct 2 triangles per 'rectangular' element
3771    volumes = []
3772    for l in range(number_of_longitudes-1):    #X direction
3773        for k in range(number_of_latitudes-1): #Y direction
3774            v1 = vertices[l,k+1]
3775            v2 = vertices[l,k]
3776            v3 = vertices[l+1,k+1]
3777            v4 = vertices[l+1,k]
3778
3779            #Note, this is different to the ferrit2sww code
3780            #since the order of the lats is reversed.
3781            volumes.append([v1,v3,v2]) #Upper element
3782            volumes.append([v4,v2,v3]) #Lower element
3783
3784    volumes = array(volumes)
3785
3786    geo_ref = Geo_reference(refzone,min(x),min(y))
3787    geo_ref.write_NetCDF(outfile)
3788
3789    # This will put the geo ref in the middle
3790    #geo_ref = Geo_reference(refzone,(max(x)+min(x))/2.0,(max(x)+min(y))/2.)
3791
3792
3793    if verbose:
3794        print '------------------------------------------------'
3795        print 'More Statistics:'
3796        print '  Extent (/lon):'
3797        print '    x in [%f, %f], len(lat) == %d'\
3798              %(min(x), max(x),
3799                len(x))
3800        print '    y in [%f, %f], len(lon) == %d'\
3801              %(min(y), max(y),
3802                len(y))
3803        print 'geo_ref: ',geo_ref
3804
3805    z = resize(bath_grid,outfile.variables['z'][:].shape)
3806    outfile.variables['x'][:] = x - geo_ref.get_xllcorner()
3807    outfile.variables['y'][:] = y - geo_ref.get_yllcorner()
3808    outfile.variables['z'][:] = z
3809    outfile.variables['elevation'][:] = z  #FIXME HACK
3810    outfile.variables['volumes'][:] = volumes.astype(Int32) #On Opteron 64
3811
3812    stage = outfile.variables['stage']
3813    xmomentum = outfile.variables['xmomentum']
3814    ymomentum = outfile.variables['ymomentum']
3815
3816    outfile.variables['time'][:] = times   #Store time relative
3817
3818    if verbose: print 'Converting quantities'
3819    n = number_of_times
3820    for j in range(number_of_times):
3821        # load in files
3822        elevation_meta, elevation_grid = \
3823           _read_asc(elevation_dir + os.sep + elevation_files[j])
3824
3825        _, u_momentum_grid =  _read_asc(ucur_dir + os.sep + ucur_files[j])
3826        _, v_momentum_grid =  _read_asc(vcur_dir + os.sep + vcur_files[j])
3827
3828        #cut matrix to desired size
3829        elevation_grid = elevation_grid[kmin:kmax,lmin:lmax]
3830        u_momentum_grid = u_momentum_grid[kmin:kmax,lmin:lmax]
3831        v_momentum_grid = v_momentum_grid[kmin:kmax,lmin:lmax]
3832       
3833        # handle missing values
3834        missing = (elevation_grid == elevation_meta['NODATA_value'])
3835        if sometrue (missing):
3836            if fail_on_NaN:
3837                msg = 'File %s contains missing values'\
3838                      %(elevation_files[j])
3839                raise DataMissingValuesError, msg
3840            else:
3841                elevation_grid = elevation_grid*(missing==0) + \
3842                                 missing*elevation_NaN_filler
3843
3844
3845        if verbose and j%((n+10)/10)==0: print '  Doing %d of %d' %(j, n)
3846        i = 0
3847        for k in range(number_of_latitudes):      #Y direction
3848            for l in range(number_of_longitudes): #X direction
3849                w = zscale*elevation_grid[k,l] + mean_stage
3850                stage[j,i] = w
3851                h = w - z[i]
3852                xmomentum[j,i] = u_momentum_grid[k,l]*h
3853                ymomentum[j,i] = v_momentum_grid[k,l]*h
3854                i += 1
3855    outfile.close()
3856
3857def _get_min_max_indexes(latitudes_ref,longitudes_ref,
3858                        minlat=None, maxlat=None,
3859                        minlon=None, maxlon=None):
3860    """
3861    return max, min indexes (for slicing) of the lat and long arrays to cover the area
3862    specified with min/max lat/long
3863
3864    Think of the latitudes and longitudes describing a 2d surface.
3865    The area returned is, if possible, just big enough to cover the
3866    inputed max/min area. (This will not be possible if the max/min area
3867    has a section outside of the latitudes/longitudes area.)
3868
3869    asset  longitudes are sorted,
3870    long - from low to high (west to east, eg 148 - 151)
3871    assert latitudes are sorted, ascending or decending
3872    """
3873    latitudes = latitudes_ref[:]
3874    longitudes = longitudes_ref[:]
3875
3876    latitudes = ensure_numeric(latitudes)
3877    longitudes = ensure_numeric(longitudes)
3878   
3879    assert allclose(sort(longitudes), longitudes)
3880   
3881    lat_ascending = True
3882    if not allclose(sort(latitudes), latitudes):
3883        lat_ascending = False
3884        # reverse order of lat, so it's in ascending order         
3885        latitudes = latitudes[::-1]
3886        assert allclose(sort(latitudes), latitudes)
3887    #print "latitudes  in funct", latitudes
3888   
3889    largest_lat_index = len(latitudes)-1
3890    #Cut out a smaller extent.
3891    if minlat == None:
3892        lat_min_index = 0
3893    else:
3894        lat_min_index = searchsorted(latitudes, minlat)-1
3895        if lat_min_index <0:
3896            lat_min_index = 0
3897
3898
3899    if maxlat == None:
3900        lat_max_index = largest_lat_index #len(latitudes)
3901    else:
3902        lat_max_index = searchsorted(latitudes, maxlat)
3903        if lat_max_index > largest_lat_index:
3904            lat_max_index = largest_lat_index
3905
3906    if minlon == None:
3907        lon_min_index = 0
3908    else:
3909        lon_min_index = searchsorted(longitudes, minlon)-1
3910        if lon_min_index <0:
3911            lon_min_index = 0
3912
3913    if maxlon == None:
3914        lon_max_index = len(longitudes)
3915    else:
3916        lon_max_index = searchsorted(longitudes, maxlon)
3917
3918    # Reversing the indexes, if the lat array is decending
3919    if lat_ascending is False:
3920        lat_min_index, lat_max_index = largest_lat_index - lat_max_index , \
3921                                       largest_lat_index - lat_min_index
3922    lat_max_index = lat_max_index + 1 # taking into account how slicing works
3923    lon_max_index = lon_max_index + 1 # taking into account how slicing works
3924
3925    return lat_min_index, lat_max_index, lon_min_index, lon_max_index
3926
3927
3928def _read_asc(filename, verbose=False):
3929    """Read esri file from the following ASCII format (.asc)
3930
3931    Example:
3932
3933    ncols         3121
3934    nrows         1800
3935    xllcorner     722000
3936    yllcorner     5893000
3937    cellsize      25
3938    NODATA_value  -9999
3939    138.3698 137.4194 136.5062 135.5558 ..........
3940
3941    """
3942
3943    datafile = open(filename)
3944
3945    if verbose: print 'Reading DEM from %s' %(filename)
3946    lines = datafile.readlines()
3947    datafile.close()
3948
3949    if verbose: print 'Got', len(lines), ' lines'
3950
3951    ncols = int(lines.pop(0).split()[1].strip())
3952    nrows = int(lines.pop(0).split()[1].strip())
3953    xllcorner = float(lines.pop(0).split()[1].strip())
3954    yllcorner = float(lines.pop(0).split()[1].strip())
3955    cellsize = float(lines.pop(0).split()[1].strip())
3956    NODATA_value = float(lines.pop(0).split()[1].strip())
3957
3958    assert len(lines) == nrows
3959
3960    #Store data
3961    grid = []
3962
3963    n = len(lines)
3964    for i, line in enumerate(lines):
3965        cells = line.split()
3966        assert len(cells) == ncols
3967        grid.append(array([float(x) for x in cells]))
3968    grid = array(grid)
3969
3970    return {'xllcorner':xllcorner,
3971            'yllcorner':yllcorner,
3972            'cellsize':cellsize,
3973            'NODATA_value':NODATA_value}, grid
3974
3975
3976
3977    ####  URS 2 SWW  ###
3978
3979lon_name = 'LON'
3980lat_name = 'LAT'
3981time_name = 'TIME'
3982precision = Float # So if we want to change the precision its done here       
3983class Write_nc:
3984    """
3985    Write an nc file.
3986   
3987    Note, this should be checked to meet cdc netcdf conventions for gridded
3988    data. http://www.cdc.noaa.gov/cdc/conventions/cdc_netcdf_standard.shtml
3989   
3990    """
3991    def __init__(self,
3992                 quantity_name,
3993                 file_name,
3994                 time_step_count,
3995                 time_step,
3996                 lon,
3997                 lat):
3998        """
3999        time_step_count is the number of time steps.
4000        time_step is the time step size
4001       
4002        pre-condition: quantity_name must be 'HA' 'UA'or 'VA'.
4003        """
4004        self.quantity_name = quantity_name
4005        quantity_units = {'HA':'CENTIMETERS',
4006                              'UA':'CENTIMETERS/SECOND',
4007                              'VA':'CENTIMETERS/SECOND'}       
4008       
4009        multiplier_dic = {'HA':100.0, # To convert from m to cm
4010                              'UA':100.0,  #  m/s to cm/sec
4011                              'VA':-100.0}  # MUX files have positve x in the
4012        # Southern direction.  This corrects for it, when writing nc files.
4013       
4014        self.quantity_multiplier =  multiplier_dic[self.quantity_name]
4015       
4016        #self.file_name = file_name
4017        self.time_step_count = time_step_count
4018        self.time_step = time_step
4019
4020        # NetCDF file definition
4021        self.outfile = NetCDFFile(file_name, 'w')
4022        outfile = self.outfile       
4023
4024        #Create new file
4025        nc_lon_lat_header(outfile, lon, lat)
4026   
4027        # TIME
4028        outfile.createDimension(time_name, None)
4029        outfile.createVariable(time_name, precision, (time_name,))
4030
4031        #QUANTITY
4032        outfile.createVariable(self.quantity_name, precision,
4033                               (time_name, lat_name, lon_name))
4034        outfile.variables[self.quantity_name].missing_value=-1.e+034
4035        outfile.variables[self.quantity_name].units= \
4036                                 quantity_units[self.quantity_name]
4037        outfile.variables[lon_name][:]= ensure_numeric(lon)
4038        outfile.variables[lat_name][:]= ensure_numeric(lat)
4039
4040        #Assume no one will be wanting to read this, while we are writing
4041        #outfile.close()
4042       
4043    def store_timestep(self, quantity_slice):
4044        """
4045        Write a time slice of quantity info
4046        quantity_slice is the data to be stored at this time step
4047        """
4048       
4049        outfile = self.outfile
4050       
4051        # Get the variables
4052        time = outfile.variables[time_name]
4053        quantity = outfile.variables[self.quantity_name]
4054           
4055        i = len(time)
4056
4057        #Store time
4058        time[i] = i*self.time_step #self.domain.time
4059        quantity[i,:] = quantity_slice* self.quantity_multiplier
4060       
4061    def close(self):
4062        self.outfile.close()
4063
4064def urs2sww(basename_in='o', basename_out=None, verbose=False,
4065            remove_nc_files=True,
4066            minlat=None, maxlat=None,
4067            minlon= None, maxlon=None,
4068            mint=None, maxt=None,
4069            mean_stage=0,
4070            origin = None,
4071            zscale=1,
4072            fail_on_NaN=True,
4073            NaN_filler=0,
4074            elevation=None):
4075    """
4076    Convert URS C binary format for wave propagation to
4077    sww format native to abstract_2d_finite_volumes.
4078
4079    Specify only basename_in and read files of the form
4080    basefilename_velocity-z-mux, basefilename_velocity-e-mux and
4081    basefilename_waveheight-n-mux containing relative height,
4082    x-velocity and y-velocity, respectively.
4083
4084    Also convert latitude and longitude to UTM. All coordinates are
4085    assumed to be given in the GDA94 datum. The latitude and longitude
4086    information is for  a grid.
4087
4088    min's and max's: If omitted - full extend is used.
4089    To include a value min may equal it, while max must exceed it.
4090    Lat and lon are assumed to be in decimal degrees.
4091    NOTE: minlon is the most east boundary.
4092   
4093    origin is a 3-tuple with geo referenced
4094    UTM coordinates (zone, easting, northing)
4095    It will be the origin of the sww file. This shouldn't be used,
4096    since all of anuga should be able to handle an arbitary origin.
4097
4098
4099    URS C binary format has data orgainised as TIME, LONGITUDE, LATITUDE
4100    which means that latitude is the fastest
4101    varying dimension (row major order, so to speak)
4102
4103    In URS C binary the latitudes and longitudes are in assending order.
4104    """
4105    if basename_out == None:
4106        basename_out = basename_in
4107    files_out = urs2nc(basename_in, basename_out)
4108    ferret2sww(basename_out,
4109               minlat=minlat,
4110               maxlat=maxlat,
4111               minlon=minlon,
4112               maxlon=maxlon,
4113               mint=mint,
4114               maxt=maxt,
4115               mean_stage=mean_stage,
4116               origin=origin,
4117               zscale=zscale,
4118               fail_on_NaN=fail_on_NaN,
4119               NaN_filler=NaN_filler,
4120               inverted_bathymetry=True,
4121               verbose=verbose)
4122    #print "files_out",files_out
4123    if remove_nc_files:
4124        for file_out in files_out:
4125            os.remove(file_out)
4126   
4127def urs2nc(basename_in = 'o', basename_out = 'urs'):
4128    """
4129    Convert the 3 urs files to 4 nc files.
4130
4131    The name of the urs file names must be;
4132    [basename_in]_velocity-z-mux
4133    [basename_in]_velocity-e-mux
4134    [basename_in]_waveheight-n-mux
4135   
4136    """
4137   
4138    files_in = [basename_in + WAVEHEIGHT_MUX_LABEL,
4139                basename_in + EAST_VELOCITY_LABEL,
4140                basename_in + NORTH_VELOCITY_LABEL]
4141    files_out = [basename_out+'_ha.nc',
4142                 basename_out+'_ua.nc',
4143                 basename_out+'_va.nc']
4144    quantities = ['HA','UA','VA']
4145
4146    for file_name in files_in:
4147        if os.access(file_name, os.F_OK) == 0 :
4148            msg = 'File %s does not exist or is not accessible' %file_name
4149            raise IOError, msg
4150       
4151    hashed_elevation = None
4152    for file_in, file_out, quantity in map(None, files_in,
4153                                           files_out,
4154                                           quantities):
4155        lonlatdep, lon, lat, depth = _binary_c2nc(file_in,
4156                                         file_out,
4157                                         quantity)
4158        #print "lonlatdep", lonlatdep
4159        if hashed_elevation == None:
4160            elevation_file = basename_out+'_e.nc'
4161            write_elevation_nc(elevation_file,
4162                                lon,
4163                                lat,
4164                                depth)
4165            hashed_elevation = myhash(lonlatdep)
4166        else:
4167            msg = "The elevation information in the mux files is inconsistent"
4168            assert hashed_elevation == myhash(lonlatdep), msg
4169    files_out.append(elevation_file)
4170    return files_out
4171   
4172def _binary_c2nc(file_in, file_out, quantity):
4173    """
4174    Reads in a quantity urs file and writes a quantity nc file.
4175    additionally, returns the depth and lat, long info,
4176    so it can be written to a file.
4177    """
4178    columns = 3 # long, lat , depth
4179    mux_file = open(file_in, 'rb')
4180   
4181    # Number of points/stations
4182    (points_num,)= unpack('i',mux_file.read(4))
4183
4184    # nt, int - Number of time steps
4185    (time_step_count,)= unpack('i',mux_file.read(4))
4186
4187    #dt, float - time step, seconds
4188    (time_step,) = unpack('f', mux_file.read(4))
4189   
4190    msg = "Bad data in the mux file."
4191    if points_num < 0:
4192        mux_file.close()
4193        raise ANUGAError, msg
4194    if time_step_count < 0:
4195        mux_file.close()
4196        raise ANUGAError, msg
4197    if time_step < 0:
4198        mux_file.close()
4199        raise ANUGAError, msg
4200   
4201    lonlatdep = p_array.array('f')
4202    lonlatdep.read(mux_file, columns * points_num)
4203    lonlatdep = array(lonlatdep, typecode=Float)   
4204    lonlatdep = reshape(lonlatdep, (points_num, columns))
4205   
4206    lon, lat, depth = lon_lat2grid(lonlatdep)
4207    lon_sorted = list(lon)
4208    lon_sorted.sort()
4209
4210    if not lon == lon_sorted:
4211        msg = "Longitudes in mux file are not in ascending order"
4212        raise IOError, msg
4213    lat_sorted = list(lat)
4214    lat_sorted.sort()
4215
4216    if not lat == lat_sorted:
4217        msg = "Latitudes in mux file are not in ascending order"
4218   
4219    nc_file = Write_nc(quantity,
4220                       file_out,
4221                       time_step_count,
4222                       time_step,
4223                       lon,
4224                       lat)
4225
4226    for i in range(time_step_count):
4227        #Read in a time slice  from mux file 
4228        hz_p_array = p_array.array('f')
4229        hz_p_array.read(mux_file, points_num)
4230        hz_p = array(hz_p_array, typecode=Float)
4231        hz_p = reshape(hz_p, (len(lon), len(lat)))
4232        hz_p = transpose(hz_p) #mux has lat varying fastest, nc has long v.f.
4233
4234        #write time slice to nc file
4235        nc_file.store_timestep(hz_p)
4236    mux_file.close()
4237    nc_file.close()
4238
4239    return lonlatdep, lon, lat, depth
4240   
4241
4242def write_elevation_nc(file_out, lon, lat, depth_vector):
4243    """
4244    Write an nc elevation file.
4245    """
4246   
4247    # NetCDF file definition
4248    outfile = NetCDFFile(file_out, 'w')
4249
4250    #Create new file
4251    nc_lon_lat_header(outfile, lon, lat)
4252   
4253    # ELEVATION
4254    zname = 'ELEVATION'
4255    outfile.createVariable(zname, precision, (lat_name, lon_name))
4256    outfile.variables[zname].units='CENTIMETERS'
4257    outfile.variables[zname].missing_value=-1.e+034
4258
4259    outfile.variables[lon_name][:]= ensure_numeric(lon)
4260    outfile.variables[lat_name][:]= ensure_numeric(lat)
4261
4262    depth = reshape(depth_vector, ( len(lat), len(lon)))
4263    outfile.variables[zname][:]= depth
4264   
4265    outfile.close()
4266   
4267def nc_lon_lat_header(outfile, lon, lat):
4268    """
4269    outfile is the netcdf file handle.
4270    lon - a list/array of the longitudes
4271    lat - a list/array of the latitudes
4272    """
4273   
4274    outfile.institution = 'Geoscience Australia'
4275    outfile.description = 'Converted from URS binary C'
4276   
4277    # Longitude
4278    outfile.createDimension(lon_name, len(lon))
4279    outfile.createVariable(lon_name, precision, (lon_name,))
4280    outfile.variables[lon_name].point_spacing='uneven'
4281    outfile.variables[lon_name].units='degrees_east'
4282    outfile.variables[lon_name].assignValue(lon)
4283
4284
4285    # Latitude
4286    outfile.createDimension(lat_name, len(lat))
4287    outfile.createVariable(lat_name, precision, (lat_name,))
4288    outfile.variables[lat_name].point_spacing='uneven'
4289    outfile.variables[lat_name].units='degrees_north'
4290    outfile.variables[lat_name].assignValue(lat)
4291
4292
4293   
4294def lon_lat2grid(long_lat_dep):
4295    """
4296    given a list of points that are assumed to be an a grid,
4297    return the long's and lat's of the grid.
4298    long_lat_dep is an array where each row is a position.
4299    The first column is longitudes.
4300    The second column is latitudes.
4301
4302    The latitude is the fastest varying dimension - in mux files
4303    """
4304    LONG = 0
4305    LAT = 1
4306    QUANTITY = 2
4307
4308    long_lat_dep = ensure_numeric(long_lat_dep, Float)
4309   
4310    num_points = long_lat_dep.shape[0]
4311    this_rows_long = long_lat_dep[0,LONG]
4312
4313    # Count the length of unique latitudes
4314    i = 0
4315    while long_lat_dep[i,LONG] == this_rows_long and i < num_points:
4316        i += 1
4317    # determine the lats and longsfrom the grid
4318    lat = long_lat_dep[:i, LAT]       
4319    long = long_lat_dep[::i, LONG]
4320   
4321    lenlong = len(long)
4322    lenlat = len(lat)
4323    #print 'len lat', lat, len(lat)
4324    #print 'len long', long, len(long)
4325         
4326    msg = 'Input data is not gridded'     
4327    assert num_points % lenlat == 0, msg
4328    assert num_points % lenlong == 0, msg
4329         
4330    # Test that data is gridded       
4331    for i in range(lenlong):
4332        msg = 'Data is not gridded.  It must be for this operation'
4333        first = i*lenlat
4334        last = first + lenlat
4335               
4336        assert allclose(long_lat_dep[first:last,LAT], lat), msg
4337        assert allclose(long_lat_dep[first:last,LONG], long[i]), msg
4338   
4339   
4340#    print 'range long', min(long), max(long)
4341#    print 'range lat', min(lat), max(lat)
4342#    print 'ref long', min(long_lat_dep[:,0]), max(long_lat_dep[:,0])
4343#    print 'ref lat', min(long_lat_dep[:,1]), max(long_lat_dep[:,1])
4344   
4345   
4346   
4347    msg = 'Out of range latitudes/longitudes'
4348    for l in lat:assert -90 < l < 90 , msg
4349    for l in long:assert -180 < l < 180 , msg
4350
4351    # Changing quantity from lat being the fastest varying dimension to
4352    # long being the fastest varying dimension
4353    # FIXME - make this faster/do this a better way
4354    # use numeric transpose, after reshaping the quantity vector
4355#    quantity = zeros(len(long_lat_dep), Float)
4356    quantity = zeros(num_points, Float)
4357   
4358#    print 'num',num_points
4359    for lat_i, _ in enumerate(lat):
4360        for long_i, _ in enumerate(long):
4361            q_index = lat_i*lenlong+long_i
4362            lld_index = long_i*lenlat+lat_i
4363#            print 'lat_i', lat_i, 'long_i',long_i, 'q_index', q_index, 'lld_index', lld_index
4364            temp = long_lat_dep[lld_index, QUANTITY]
4365            quantity[q_index] = temp
4366           
4367    return long, lat, quantity
4368
4369    ####  END URS 2 SWW  ###
4370
4371    #### URS UNGRIDDED 2 SWW ###
4372
4373    ### PRODUCING THE POINTS NEEDED FILE ###
4374LL_LAT = -50.0
4375LL_LONG = 80.0
4376GRID_SPACING = 1.0/60.0
4377LAT_AMOUNT = 4800
4378LONG_AMOUNT = 3600
4379def URS_points_needed_to_file(file_name, boundary_polygon, zone,
4380                              ll_lat=LL_LAT, ll_long=LL_LONG,
4381                              grid_spacing=GRID_SPACING, 
4382                              lat_amount=LAT_AMOUNT, long_amount=LONG_AMOUNT,
4383                              export_csv=False, use_cache=False,
4384                              verbose=False):
4385    """
4386    file_name - name of the urs file produced for David.
4387    boundary_polygon - a list of points that describes a polygon.
4388                      The last point is assumed ot join the first point.
4389                      This is in UTM (lat long would be better though)
4390
4391    ll_lat - lower left latitude, in decimal degrees
4392    ll-long - lower left longitude, in decimal degrees
4393    grid_spacing - in deciamal degrees
4394
4395
4396    Don't add the file extension.  It will be added.
4397    """
4398    geo = URS_points_needed(boundary_polygon, zone, ll_lat, ll_long,
4399                            grid_spacing, 
4400                      lat_amount, long_amount,use_cache, verbose)
4401    if not file_name[-4:] == ".urs":
4402        file_name += ".urs"
4403    geo.export_points_file(file_name)
4404    if export_csv:
4405        if file_name[-4:] == ".urs":
4406            file_name = file_name[:-4] + ".csv"
4407        geo.export_points_file(file_name)
4408
4409def URS_points_needed(boundary_polygon, zone, ll_lat=LL_LAT,
4410                      ll_long=LL_LONG, grid_spacing=GRID_SPACING, 
4411                      lat_amount=LAT_AMOUNT, long_amount=LONG_AMOUNT,
4412                      use_cache=False, verbose=False):
4413    args = (boundary_polygon,
4414                      zone)
4415    kwargs = {'ll_lat': ll_lat,
4416              'll_long': ll_long,
4417              'grid_spacing': grid_spacing,
4418              'lat_amount': lat_amount,
4419              'long_amount': long_amount} 
4420    if use_cache is True:
4421        try:
4422            from anuga.caching import cache
4423        except:
4424            msg = 'Caching was requested, but caching module'+\
4425                  'could not be imported'
4426            raise msg
4427
4428
4429        geo = cache(_URS_points_needed,
4430                  args, kwargs,
4431                  verbose=verbose,
4432                  compression=False)
4433    else:
4434        #I was getting 'got multiple values for keyword argument' errors
4435        #geo = apply(_URS_points_needed, args, kwargs)
4436        geo = _URS_points_needed(boundary_polygon,
4437                      zone, ll_lat,
4438                      ll_long, grid_spacing, 
4439                      lat_amount, long_amount)
4440
4441    return geo   
4442def _URS_points_needed(boundary_polygon,
4443                      zone, ll_lat=LL_LAT,
4444                      ll_long=LL_LONG, grid_spacing=GRID_SPACING, 
4445                      lat_amount=LAT_AMOUNT, long_amount=LONG_AMOUNT):
4446    """
4447
4448    boundary_polygon - a list of points that describes a polygon.
4449                      The last point is assumed ot join the first point.
4450                      This is in UTM (lat long would be better though)
4451
4452    ll_lat - lower left latitude, in decimal degrees
4453    ll-long - lower left longitude, in decimal degrees
4454    grid_spacing - in deciamal degrees
4455
4456    """
4457   
4458    from sets import ImmutableSet
4459   
4460    msg = "grid_spacing can not be zero"
4461    assert not grid_spacing ==0, msg
4462    a = boundary_polygon
4463    # List of segments.  Each segment is two points.
4464    segs = [i and [a[i-1], a[i]] or [a[len(a)-1], a[0]] for i in range(len(a))]
4465
4466    # convert the segs to Lat's and longs.
4467   
4468    # Don't assume the zone of the segments is the same as the lower left
4469    # corner of the lat long data!!  They can easily be in different zones
4470   
4471    lat_long_set = ImmutableSet()
4472    for seg in segs:
4473        points_lat_long = points_needed(seg, ll_lat, ll_long, grid_spacing, 
4474                      lat_amount, long_amount, zone)
4475        lat_long_set |= ImmutableSet(points_lat_long)
4476    #print "lat_long_set",lat_long_set
4477    geo = Geospatial_data(data_points=list(lat_long_set),
4478                              points_are_lats_longs=True)
4479    return geo
4480   
4481def points_needed(seg, ll_lat, ll_long, grid_spacing, 
4482                  lat_amount, long_amount, zone):
4483    """
4484    return a list of the points, in lats and longs that are needed to
4485    interpolate any point on the segment.
4486    """
4487    from math import sqrt
4488    #print "zone",zone
4489    geo_reference = Geo_reference(zone=zone)
4490    #print "seg",seg
4491    geo = Geospatial_data(seg,geo_reference=geo_reference)
4492    seg_lat_long = geo.get_data_points(as_lat_long=True)
4493    #print "seg_lat_long", seg_lat_long
4494    # 1.415 = 2^0.5, rounded up....
4495    sqrt_2_rounded_up = 1.415
4496    buffer = sqrt_2_rounded_up * grid_spacing
4497   
4498    max_lat = max(seg_lat_long[0][0], seg_lat_long[1][0]) + buffer
4499    max_long = max(seg_lat_long[0][1], seg_lat_long[1][1]) + buffer
4500    min_lat = min(seg_lat_long[0][0], seg_lat_long[1][0]) - buffer
4501    min_long = min(seg_lat_long[0][1], seg_lat_long[1][1]) - buffer
4502
4503    first_row = (min_long - ll_long)/grid_spacing
4504    # To round up
4505    first_row_long = int(round(first_row + 0.5))
4506    #print "first_row", first_row_long
4507
4508    last_row = (max_long - ll_long)/grid_spacing # round down
4509    last_row_long = int(round(last_row))
4510    #print "last_row",last_row _long
4511   
4512    first_row = (min_lat - ll_lat)/grid_spacing
4513    # To round up
4514    first_row_lat = int(round(first_row + 0.5))
4515    #print "first_row", first_row_lat
4516
4517    last_row = (max_lat - ll_lat)/grid_spacing # round down
4518    last_row_lat = int(round(last_row))
4519    #print "last_row",last_row_lat
4520
4521    # to work out the max distance -
4522    # 111120 - horizontal distance between 1 deg latitude.
4523    #max_distance = sqrt_2_rounded_up * 111120 * grid_spacing
4524    max_distance = 157147.4112 * grid_spacing
4525    #print "max_distance", max_distance #2619.12 m for 1 minute
4526    points_lat_long = []
4527    # Create a list of the lat long points to include.
4528    for index_lat in range(first_row_lat, last_row_lat + 1):
4529        for index_long in range(first_row_long, last_row_long + 1):
4530            lat = ll_lat + index_lat*grid_spacing
4531            long = ll_long + index_long*grid_spacing
4532
4533            #filter here to keep good points
4534            if keep_point(lat, long, seg, max_distance):
4535                points_lat_long.append((lat, long)) #must be hashable
4536    #print "points_lat_long", points_lat_long
4537
4538    # Now that we have these points, lets throw ones out that are too far away
4539    return points_lat_long
4540
4541def keep_point(lat, long, seg, max_distance):
4542    """
4543    seg is two points, UTM
4544    """
4545    from math import sqrt
4546    _ , x0, y0 = redfearn(lat, long)
4547    x1 = seg[0][0]
4548    y1 = seg[0][1]
4549    x2 = seg[1][0]
4550    y2 = seg[1][1]
4551
4552    x2_1 = x2-x1
4553    y2_1 = y2-y1
4554    d = abs((x2_1)*(y1-y0)-(x1-x0)*(y2_1))/sqrt((x2_1)*(x2_1)+(y2_1)*(y2_1))
4555    if d <= max_distance:
4556        return True
4557    else:
4558        return False
4559   
4560    #### CONVERTING UNGRIDDED URS DATA TO AN SWW FILE ####
4561   
4562WAVEHEIGHT_MUX_LABEL = '_waveheight-z-mux'
4563EAST_VELOCITY_LABEL =  '_velocity-e-mux'
4564NORTH_VELOCITY_LABEL =  '_velocity-n-mux' 
4565def urs_ungridded2sww(basename_in='o', basename_out=None, verbose=False,
4566                      mint=None, maxt=None,
4567                      mean_stage=0,
4568                      origin=None,
4569                      hole_points_UTM=None,
4570                      zscale=1):
4571    """   
4572    Convert URS C binary format for wave propagation to
4573    sww format native to abstract_2d_finite_volumes.
4574
4575
4576    Specify only basename_in and read files of the form
4577    basefilename_velocity-z-mux, basefilename_velocity-e-mux and
4578    basefilename_waveheight-n-mux containing relative height,
4579    x-velocity and y-velocity, respectively.
4580
4581    Also convert latitude and longitude to UTM. All coordinates are
4582    assumed to be given in the GDA94 datum. The latitude and longitude
4583    information is assumed ungridded grid.
4584
4585    min's and max's: If omitted - full extend is used.
4586    To include a value min ans max may equal it.
4587    Lat and lon are assumed to be in decimal degrees.
4588   
4589    origin is a 3-tuple with geo referenced
4590    UTM coordinates (zone, easting, northing)
4591    It will be the origin of the sww file. This shouldn't be used,
4592    since all of anuga should be able to handle an arbitary origin.
4593    The mux point info is NOT relative to this origin.
4594
4595
4596    URS C binary format has data orgainised as TIME, LONGITUDE, LATITUDE
4597    which means that latitude is the fastest
4598    varying dimension (row major order, so to speak)
4599
4600    In URS C binary the latitudes and longitudes are in assending order.
4601
4602    Note, interpolations of the resulting sww file will be different
4603    from results of urs2sww.  This is due to the interpolation
4604    function used, and the different grid structure between urs2sww
4605    and this function.
4606   
4607    Interpolating data that has an underlying gridded source can
4608    easily end up with different values, depending on the underlying
4609    mesh.
4610
4611    consider these 4 points
4612    50  -50
4613
4614    0     0
4615
4616    The grid can be
4617     -
4618    |\|    A
4619     -
4620     or;
4621      -
4622     |/|   B
4623      -
4624      If a point is just below the center of the midpoint, it will have a
4625      +ve value in grid A and a -ve value in grid B.
4626    """ 
4627    from anuga.pmesh.mesh import Mesh, NoTrianglesError
4628
4629    files_in = [basename_in + WAVEHEIGHT_MUX_LABEL,
4630                basename_in + EAST_VELOCITY_LABEL,
4631                basename_in + NORTH_VELOCITY_LABEL]
4632    quantities = ['HA','UA','VA']
4633
4634    # instanciate urs_points of the three mux files.
4635    mux = {}
4636    for quantity, file in map(None, quantities, files_in):
4637        mux[quantity] = Urs_points(file)
4638       
4639    # Could check that the depth is the same. (hashing)
4640
4641    # handle to a mux file to do depth stuff
4642    a_mux = mux[quantities[0]]
4643   
4644    # Convert to utm
4645    lat = a_mux.lonlatdep[:,1]
4646    long = a_mux.lonlatdep[:,0]
4647    points_utm, zone = convert_from_latlon_to_utm( \
4648        latitudes=lat, longitudes=long)
4649    #print "points_utm", points_utm
4650    #print "zone", zone
4651
4652    elevation = a_mux.lonlatdep[:,2] * -1 #
4653   
4654    # grid ( create a mesh from the selected points)
4655    # This mesh has a problem.  Triangles are streched over ungridded areas.
4656    #  If these areas could be described as holes in pmesh, that would be great
4657
4658    # I can't just get the user to selection a point in the middle.
4659    # A boundary is needed around these points.
4660    # But if the zone of points is obvious enough auto-segment should do
4661    # a good boundary.
4662    mesh = Mesh()
4663    mesh.add_vertices(points_utm)
4664    mesh.auto_segment(smooth_indents=True, expand_pinch=True)
4665    # To try and avoid alpha shape 'hugging' too much
4666    mesh.auto_segment( mesh.shape.get_alpha()*1.1 )
4667    if hole_points_UTM is not None:
4668        point = ensure_absolute(hole_points_UTM)
4669        mesh.add_hole(point[0], point[1])
4670    try:
4671        mesh.generate_mesh(minimum_triangle_angle=0.0, verbose=False)
4672    except NoTrianglesError:
4673        # This is a bit of a hack, going in and changing the
4674        # data structure.
4675        mesh.holes = []
4676        mesh.generate_mesh(minimum_triangle_angle=0.0, verbose=False)
4677    mesh_dic = mesh.Mesh2MeshList()
4678
4679    #mesh.export_mesh_file(basename_in + '_168.tsh')
4680    #import sys; sys.exit()
4681    # These are the times of the mux file
4682    mux_times = []
4683    for i in range(a_mux.time_step_count):
4684        mux_times.append(a_mux.time_step * i) 
4685    mux_times_start_i, mux_times_fin_i = mux2sww_time(mux_times, mint, maxt)
4686    times = mux_times[mux_times_start_i:mux_times_fin_i]
4687   
4688    if mux_times_start_i == mux_times_fin_i:
4689        # Close the mux files
4690        for quantity, file in map(None, quantities, files_in):
4691            mux[quantity].close()
4692        msg="Due to mint and maxt there's no time info in the boundary SWW."
4693        assert 1==0 , msg #Happy to know a better way of doing this..
4694       
4695    # If this raise is removed there is currently no downstream errors
4696           
4697    points_utm=ensure_numeric(points_utm)
4698    #print "mesh_dic['generatedpointlist']", mesh_dic['generatedpointlist']
4699    #print "points_utm", points_utm
4700    assert ensure_numeric(mesh_dic['generatedpointlist']) == \
4701           ensure_numeric(points_utm)
4702   
4703    volumes = mesh_dic['generatedtrianglelist']
4704   
4705    # write sww intro and grid stuff.   
4706    if basename_out is None:
4707        swwname = basename_in + '.sww'
4708    else:
4709        swwname = basename_out + '.sww'
4710
4711    if verbose: print 'Output to ', swwname
4712    outfile = NetCDFFile(swwname, 'w')
4713    # For a different way of doing this, check out tsh2sww
4714    # work out sww_times and the index range this covers
4715    sww = Write_sww()
4716    sww.header(outfile, times, len(volumes), len(points_utm),
4717                         verbose=verbose)
4718    outfile.mean_stage = mean_stage
4719    outfile.zscale = zscale
4720
4721    sww.triangulation(outfile, points_utm, volumes,
4722                            elevation, zone,  new_origin=origin,
4723                            verbose=verbose)
4724   
4725    if verbose: print 'Converting quantities'
4726    j = 0
4727    # Read in a time slice from each mux file and write it to the sww file
4728    for ha, ua, va in map(None, mux['HA'], mux['UA'], mux['VA']):
4729        if j >= mux_times_start_i and j < mux_times_fin_i:
4730            stage = zscale*ha + mean_stage
4731            h = stage - elevation
4732            xmomentum = ua*h
4733            ymomentum = -1*va*h # -1 since in mux files south is positive.
4734            sww.quantities(outfile, 
4735                                     slice_index=j - mux_times_start_i,
4736                                     verbose=verbose,
4737                                     stage=stage,
4738                                     xmomentum=xmomentum,
4739                                     ymomentum=ymomentum)
4740        j += 1
4741    if verbose: sww.verbose_quantities(outfile)
4742    outfile.close()
4743    #
4744    # Do some conversions while writing the sww file
4745
4746   
4747def mux2sww_time(mux_times, mint, maxt):
4748    """
4749    """
4750
4751    if mint == None:
4752        mux_times_start_i = 0
4753    else:
4754        mux_times_start_i = searchsorted(mux_times, mint)
4755       
4756    if maxt == None:
4757        mux_times_fin_i = len(mux_times)
4758    else:
4759        maxt += 0.5 # so if you specify a time where there is
4760                    # data that time will be included
4761        mux_times_fin_i = searchsorted(mux_times, maxt)
4762
4763    return mux_times_start_i, mux_times_fin_i
4764
4765
4766class Write_sww:
4767    from anuga.shallow_water.shallow_water_domain import Domain
4768    sww_quantities = Domain.conserved_quantities
4769    RANGE = '_range'
4770    def __init__(self):
4771        pass
4772   
4773    def header(self,outfile, times, number_of_volumes,
4774                         number_of_points,
4775                         description='Converted from XXX',
4776                         smoothing=True,
4777                         order=1, verbose=False):
4778        """
4779        outfile - the name of the file that will be written
4780        times - A list of the time slice times OR a start time
4781        Note, if a list is given the info will be made relative.
4782        number_of_volumes - the number of triangles
4783        """
4784   
4785        outfile.institution = 'Geoscience Australia'
4786        outfile.description = description
4787
4788        #For sww compatibility
4789        if smoothing is True:
4790            # Smoothing to be depreciated
4791            outfile.smoothing = 'Yes'
4792            outfile.vertices_are_stored_uniquely = 'False'
4793        else:
4794            # Smoothing to be depreciated
4795            outfile.smoothing = 'No'
4796            outfile.vertices_are_stored_uniquely = 'True'
4797        outfile.order = order
4798
4799        try:
4800            revision_number = get_revision_number()
4801        except:
4802            revision_number = None
4803        # writing a string so None can be written   
4804        outfile.revision_number = str(revision_number)
4805        #times - A list or array of the time slice times OR a start time
4806        #times = ensure_numeric(times)
4807        #Start time in seconds since the epoch (midnight 1/1/1970)
4808
4809        # this is being used to seperate one number from a list.
4810        # what it is actually doing is sorting lists from numeric arrays.
4811        if type(times) is list or type(times) is ArrayType: 
4812            number_of_times = len(times)
4813            times = ensure_numeric(times) 
4814            if number_of_times == 0:
4815                starttime = 0
4816            else:
4817                starttime = times[0]
4818                times = times - starttime  #Store relative times
4819        else:
4820            number_of_times = 0
4821            starttime = times
4822            #times = ensure_numeric([])
4823        outfile.starttime = starttime
4824        # dimension definitions
4825        outfile.createDimension('number_of_volumes', number_of_volumes)
4826        outfile.createDimension('number_of_vertices', 3)
4827        outfile.createDimension('numbers_in_range', 2)
4828   
4829        if smoothing is True:
4830            outfile.createDimension('number_of_points', number_of_points)
4831       
4832            # FIXME(Ole): This will cause sww files for paralle domains to
4833            # have ghost nodes stored (but not used by triangles).
4834            # To clean this up, we have to change get_vertex_values and
4835            # friends in quantity.py (but I can't be bothered right now)
4836        else:
4837            outfile.createDimension('number_of_points', 3*number_of_volumes)
4838        outfile.createDimension('number_of_timesteps', number_of_times)
4839
4840        # variable definitions
4841        outfile.createVariable('x', precision, ('number_of_points',))
4842        outfile.createVariable('y', precision, ('number_of_points',))
4843        outfile.createVariable('elevation', precision, ('number_of_points',))
4844        q = 'elevation'
4845        outfile.createVariable(q+Write_sww.RANGE, precision,
4846                               ('numbers_in_range',))
4847        # The values are initally filled with large (10e+36) numbers.
4848        # I'm relying on this feature.  Maybe I shouldn't?
4849        outfile.variables[q+Write_sww.RANGE][1] = \
4850                          -1*outfile.variables[q+Write_sww.RANGE][1]
4851
4852        #FIXME: Backwards compatibility
4853        outfile.createVariable('z', precision, ('number_of_points',))
4854        #################################
4855
4856        outfile.createVariable('volumes', Int, ('number_of_volumes',
4857                                                'number_of_vertices'))
4858       
4859        outfile.createVariable('time', precision,
4860                               ('number_of_timesteps',))
4861       
4862        for q in Write_sww.sww_quantities:
4863            outfile.createVariable(q, precision,
4864                                   ('number_of_timesteps',
4865                                    'number_of_points')) 
4866            outfile.createVariable(q+Write_sww.RANGE, precision,
4867                                   ('numbers_in_range',))
4868            # Initialising the max value to a very small number.
4869            # It assumes that netcdf initialises it to a very large number
4870            outfile.variables[q+Write_sww.RANGE][1] = \
4871                -outfile.variables[q+Write_sww.RANGE][1]
4872           
4873        if type(times) is list or type(times) is ArrayType: 
4874            outfile.variables['time'][:] = times    #Store time relative
4875           
4876        if verbose:
4877            print '------------------------------------------------'
4878            print 'Statistics:'
4879            print '    t in [%f, %f], len(t) == %d'\
4880                  %(min(times.flat), max(times.flat), len(times.flat))
4881
4882       
4883    def triangulation(self,outfile, points_utm, volumes,
4884                                elevation, zone=None, new_origin=None, 
4885                                points_georeference=None, verbose=False):
4886        """
4887       
4888        new_origin - qa georeference that the points can be set to. (Maybe
4889        do this before calling this function.)
4890       
4891        points_utm - currently a list or array of the points in UTM.
4892        points_georeference - the georeference of the points_utm
4893       
4894        How about passing new_origin and current_origin.
4895        If you get both, do a convertion from the old to the new.
4896       
4897        If you only get new_origin, the points are absolute, convert to relative
4898       
4899        if you only get the current_origin the points are relative, store
4900        as relative.
4901       
4902        if you get no georefs create a new georef based on the minimums of
4903        points_utm.  (Another option would be to default to absolute)
4904       
4905        Yes, and this is done in another part of the code.
4906        Probably geospatial.
4907       
4908        If you don't supply either geo_refs, then supply a zone. If not
4909        the default zone will be used.
4910       
4911       
4912        precon
4913       
4914        header has been called.
4915        """
4916       
4917        number_of_points = len(points_utm)   
4918        volumes = array(volumes)
4919
4920        # given the two geo_refs and the points, do the stuff
4921        # described in the method header
4922        # if this is needed else where, pull out as a function
4923        points_georeference = ensure_geo_reference(points_georeference)
4924        new_origin = ensure_geo_reference(new_origin)
4925        if new_origin is None and points_georeference is not None:
4926            points = points_utm
4927            geo_ref = points_georeference
4928        else:
4929            if new_origin is None:
4930                new_origin = Geo_reference(zone,min(points_utm[:,0]),
4931                                           min(points_utm[:,1]))
4932            points = new_origin.change_points_geo_ref(points_utm,
4933                                                      points_georeference)
4934            geo_ref = new_origin
4935
4936        # At this stage I need a georef and points
4937        # the points are relative to the georef
4938        geo_ref.write_NetCDF(outfile)
4939   
4940        # This will put the geo ref in the middle
4941        #geo_ref = Geo_reference(refzone,(max(x)+min(x))/2.0,(max(x)+min(y))/2.)
4942        x =  points[:,0]
4943        y =  points[:,1]
4944        z = outfile.variables['z'][:]
4945   
4946        if verbose:
4947            print '------------------------------------------------'
4948            print 'More Statistics:'
4949            print '  Extent (/lon):'
4950            print '    x in [%f, %f], len(lat) == %d'\
4951                  %(min(x), max(x),
4952                    len(x))
4953            print '    y in [%f, %f], len(lon) == %d'\
4954                  %(min(y), max(y),
4955                    len(y))
4956            print '    z in [%f, %f], len(z) == %d'\
4957                  %(min(elevation), max(elevation),
4958                    len(elevation))
4959            print 'geo_ref: ',geo_ref
4960            print '------------------------------------------------'
4961           
4962        #z = resize(bath_grid,outfile.variables['z'][:].shape)
4963        outfile.variables['x'][:] = points[:,0] #- geo_ref.get_xllcorner()
4964        outfile.variables['y'][:] = points[:,1] #- geo_ref.get_yllcorner()
4965        outfile.variables['z'][:] = elevation
4966        outfile.variables['elevation'][:] = elevation  #FIXME HACK
4967        outfile.variables['volumes'][:] = volumes.astype(Int32) #On Opteron 64
4968
4969        q = 'elevation'
4970        # This updates the _range values
4971        outfile.variables[q+Write_sww.RANGE][0] = min(elevation)
4972        outfile.variables[q+Write_sww.RANGE][1] = max(elevation)
4973
4974    def quantities(self, outfile, precision=Float,
4975                             slice_index=None, time=None,
4976                             verbose=False, **quant):
4977        """
4978        Write the quantity info.
4979
4980        **quant is extra keyword arguments passed in. These must be
4981          the sww quantities, currently; stage, xmomentum, ymomentum.
4982       
4983        if the time array is already been built, use the slice_index
4984        to specify the index.
4985       
4986        Otherwise, use time to increase the time dimension
4987
4988        Maybe make this general, but the viewer assumes these quantities,
4989        so maybe we don't want it general - unless the viewer is general
4990       
4991        precon
4992        triangulation and
4993        header have been called.
4994        """
4995
4996        if time is not None:
4997            file_time = outfile.variables['time']
4998            slice_index = len(file_time)
4999            file_time[slice_index] = time   
5000
5001        # write the conserved quantities from Doamin.
5002        # Typically stage,  xmomentum, ymomentum
5003        # other quantities will be ignored, silently.
5004        for q in Write_sww.sww_quantities:
5005            if not quant.has_key(q):
5006                msg = 'SWW file can not write quantity %s' %q
5007                raise NewQuantity, msg
5008            else:
5009                q_values = quant[q]
5010                outfile.variables[q][slice_index] = q_values.astype(precision)
5011
5012                # This updates the _range values
5013                q_range = outfile.variables[q+Write_sww.RANGE][:]
5014                q_values_min = min(q_values)
5015                if q_values_min < q_range[0]:
5016                    outfile.variables[q+Write_sww.RANGE][0] = q_values_min
5017                q_values_max = max(q_values)
5018                if q_values_max > q_range[1]:
5019                    outfile.variables[q+Write_sww.RANGE][1] = q_values_max
5020
5021    def verbose_quantities(self, outfile):
5022        print '------------------------------------------------'
5023        print 'More Statistics:'
5024        for q in Write_sww.sww_quantities:
5025            print %s in [%f, %f]' %(q
5026                                       ,outfile.variables[q+Write_sww.RANGE][0]
5027                                       ,outfile.variables[q+Write_sww.RANGE][1]
5028                                       )
5029        print '------------------------------------------------'
5030       
5031def obsolete_write_sww_time_slices(outfile, has, uas, vas, elevation,
5032                         mean_stage=0, zscale=1,
5033                         verbose=False):   
5034    #Time stepping
5035    stage = outfile.variables['stage']
5036    xmomentum = outfile.variables['xmomentum']
5037    ymomentum = outfile.variables['ymomentum']
5038
5039    n = len(has)
5040    j=0
5041    for ha, ua, va in map(None, has, uas, vas):
5042        if verbose and j%((n+10)/10)==0: print '  Doing %d of %d' %(j, n)
5043        w = zscale*ha + mean_stage
5044        stage[j] = w
5045        h = w - elevation
5046        xmomentum[j] = ua*h
5047        ymomentum[j] = -1*va*#  -1 since in mux files south is positive.
5048        j += 1
5049   
5050def urs2txt(basename_in, location_index=None):
5051    """
5052    Not finished or tested
5053    """
5054   
5055    files_in = [basename_in + WAVEHEIGHT_MUX_LABEL,
5056                basename_in + EAST_VELOCITY_LABEL,
5057                basename_in + NORTH_VELOCITY_LABEL]
5058    quantities = ['HA','UA','VA']
5059
5060    d = ","
5061   
5062    # instanciate urs_points of the three mux files.
5063    mux = {}
5064    for quantity, file in map(None, quantities, files_in):
5065        mux[quantity] = Urs_points(file)
5066       
5067    # Could check that the depth is the same. (hashing)
5068
5069    # handle to a mux file to do depth stuff
5070    a_mux = mux[quantities[0]]
5071   
5072    # Convert to utm
5073    latitudes = a_mux.lonlatdep[:,1]
5074    longitudes = a_mux.lonlatdep[:,0]
5075    points_utm, zone = convert_from_latlon_to_utm( \
5076        latitudes=latitudes, longitudes=longitudes)
5077    #print "points_utm", points_utm
5078    #print "zone", zone
5079    depths = a_mux.lonlatdep[:,2]  #
5080   
5081    fid = open(basename_in + '.txt', 'w')
5082
5083    fid.write("zone: " + str(zone) + "\n")
5084
5085    if location_index is not None:
5086        #Title
5087        li = location_index
5088        fid.write('location_index'+d+'lat'+d+ 'long' +d+ 'Easting' +d+ \
5089                  'Northing' + "\n")
5090        fid.write(str(li) +d+ str(latitudes[li])+d+ \
5091              str(longitudes[li]) +d+ str(points_utm[li][0]) +d+ \
5092              str(points_utm[li][01]) + "\n")
5093
5094    # the non-time dependent stuff
5095    #Title
5096    fid.write('location_index'+d+'lat'+d+ 'long' +d+ 'Easting' +d+ \
5097                  'Northing' +d+ 'depth m' + "\n")
5098    i = 0
5099    for depth, point_utm, lat, long in map(None, depths,
5100                                               points_utm, latitudes,
5101                                               longitudes):
5102       
5103        fid.write(str(i) +d+ str(lat)+d+ str(long) +d+ str(point_utm[0]) +d+ \
5104                  str(point_utm[01]) +d+ str(depth) + "\n")
5105        i +=1
5106    #Time dependent
5107    if location_index is not None:
5108        time_step = a_mux.time_step
5109        i = 0
5110        #Title
5111        fid.write('time' +d+ 'HA depth m'+d+ \
5112                      'UA momentum East x m/sec' +d+ 'VA momentum North y m/sec' \
5113                      + "\n")
5114        for HA, UA, VA in map(None, mux['HA'], mux['UA'], mux['VA']):
5115            fid.write(str(i*time_step) +d+ str(HA[location_index])+d+ \
5116                      str(UA[location_index]) +d+ str(VA[location_index]) \
5117                      + "\n")
5118           
5119            i +=1
5120   
5121class Urs_points:
5122    """
5123    Read the info in URS mux files.
5124
5125    for the quantities heres a correlation between the file names and
5126    what they mean;
5127    z-mux is height above sea level, m
5128    e-mux is velocity is Eastern direction, m/s
5129    n-mux is velocity is Northern direction, m/s   
5130    """
5131    def __init__(self,urs_file):
5132        self.iterated = False
5133        columns = 3 # long, lat , depth
5134        mux_file = open(urs_file, 'rb')
5135       
5136        # Number of points/stations
5137        (self.points_num,)= unpack('i',mux_file.read(4))
5138       
5139        # nt, int - Number of time steps
5140        (self.time_step_count,)= unpack('i',mux_file.read(4))
5141        #print "self.time_step_count", self.time_step_count
5142        #dt, float - time step, seconds
5143        (self.time_step,) = unpack('f', mux_file.read(4))
5144        #print "self.time_step", self.time_step
5145        msg = "Bad data in the urs file."
5146        if self.points_num < 0:
5147            mux_file.close()
5148            raise ANUGAError, msg
5149        if self.time_step_count < 0:
5150            mux_file.close()
5151            raise ANUGAError, msg
5152        if self.time_step < 0:
5153            mux_file.close()
5154            raise ANUGAError, msg
5155
5156        # the depth is in meters, and it is the distance from the ocean
5157        # to the sea bottom.
5158        lonlatdep = p_array.array('f')
5159        lonlatdep.read(mux_file, columns * self.points_num)
5160        lonlatdep = array(lonlatdep, typecode=Float)   
5161        lonlatdep = reshape(lonlatdep, (self.points_num, columns))
5162        #print 'lonlatdep',lonlatdep
5163        self.lonlatdep = lonlatdep
5164       
5165        self.mux_file = mux_file
5166        # check this array
5167
5168    def __iter__(self):
5169        """
5170        iterate over quantity data which is with respect to time.
5171
5172        Note: You can only interate once over an object
5173       
5174        returns quantity infomation for each time slice
5175        """
5176        msg =  "You can only interate once over a urs file."
5177        assert not self.iterated, msg
5178        self.iter_time_step = 0
5179        self.iterated = True
5180        return self
5181   
5182    def next(self):
5183        if self.time_step_count == self.iter_time_step:
5184            self.close()
5185            raise StopIteration
5186        #Read in a time slice  from mux file 
5187        hz_p_array = p_array.array('f')
5188        hz_p_array.read(self.mux_file, self.points_num)
5189        hz_p = array(hz_p_array, typecode=Float)
5190        self.iter_time_step += 1
5191       
5192        return hz_p
5193
5194    def close(self):
5195        self.mux_file.close()
5196       
5197    #### END URS UNGRIDDED 2 SWW ###
5198
5199       
5200def start_screen_catcher(dir_name, myid='', numprocs='', extra_info='',
5201                         print_to_screen=False, verbose=False):
5202    """
5203    Used to store screen output and errors to file, if run on multiple
5204    processes eachprocessor will have its own output and error file.
5205   
5206    extra_info - is used as a string that can identify outputs with another
5207    string eg. '_other'
5208    """
5209    import sys
5210    dir_name = dir_name
5211    if access(dir_name,W_OK) == 0:
5212        if verbose: print 'Make directory %s' %dir_name
5213        if verbose: print "myid", myid
5214        mkdir (dir_name,0777)
5215    if myid <>'':
5216        myid = '_'+str(myid)
5217    if numprocs <>'':
5218        numprocs = '_'+str(numprocs)
5219    if extra_info <>'':
5220        extra_info = '_'+str(extra_info)
5221    screen_output_name = dir_name + "screen_output%s%s%s.txt" %(myid,numprocs,extra_info)
5222    screen_error_name = dir_name + "screen_error%s%s%s.txt" %(myid,numprocs,extra_info)
5223    print screen_output_name
5224    #used to catch screen output to file
5225    sys.stdout = Screen_Catcher(screen_output_name)
5226    sys.stderr = Screen_Catcher(screen_error_name)
5227
5228class Screen_Catcher:
5229    """this simply catches the screen output and stores it to file defined by
5230    start_screen_catcher (above)
5231    """
5232   
5233    def __init__(self, filename):
5234        self.filename = filename
5235 
5236        if exists(self.filename)is True:
5237            print'Old existing file "%s" has been deleted' %(self.filename)
5238            remove(self.filename)
5239
5240    def write(self, stuff):
5241        fid = open(self.filename, 'a')
5242        fid.write(stuff)
5243#        if print_to_screen: print stuff
5244
5245def copy_code_files(dir_name, filename1, filename2):
5246    """Copies "filename1" and "filename2" to "dir_name". Very useful for
5247    information management
5248    filename1 and filename2 are both absolute pathnames   
5249    """
5250
5251    if access(dir_name,F_OK) == 0:
5252        print 'Make directory %s' %dir_name
5253        mkdir (dir_name,0777)
5254    shutil.copy(filename1, dir_name + sep + basename(filename1))
5255    shutil.copy(filename2, dir_name + sep + basename(filename2))
5256#    copy (__file__, project.output_run_time_dir + basename(__file__))
5257    print 'Files %s and %s copied' %(filename1, filename2)
5258
5259def get_data_from_file(filename,separator_value = ','):
5260    """
5261    Read in data information from file and
5262   
5263    Returns:
5264        header_fields, a string? of the first line separated
5265        by the 'separator_value'
5266       
5267        data, a array (N data columns X M lines) in the file
5268        excluding the header
5269       
5270    NOTE: wont deal with columns with different lenghts and there must be
5271    no blank lines at the end.
5272    """
5273   
5274    fid = open(filename)
5275    lines = fid.readlines()
5276   
5277    fid.close()
5278   
5279    header_line = lines[0]
5280    header_fields = header_line.split(separator_value)
5281
5282    #array to store data, number in there is to allow float...
5283    #i'm sure there is a better way!
5284    data=array([],typecode=Float)
5285    data=resize(data,((len(lines)-1),len(header_fields)))
5286#    print 'number of fields',range(len(header_fields))
5287#    print 'number of lines',len(lines), shape(data)
5288#    print'data',data[1,1],header_line
5289
5290    array_number = 0
5291    line_number = 1
5292    while line_number < (len(lines)):
5293        for i in range(len(header_fields)): 
5294            #this get line below the header, explaining the +1
5295            #and also the line_number can be used as the array index
5296            fields = lines[line_number].split(separator_value)
5297            #assign to array
5298            data[array_number,i] = float(fields[i])
5299           
5300        line_number = line_number +1
5301        array_number = array_number +1
5302       
5303    return header_fields, data
5304
5305def store_parameters(verbose=False,**kwargs):
5306    """
5307    Store "kwargs" into a temp csv file, if "completed" is a kwargs csv file is
5308    kwargs[file_name] else it is kwargs[output_dir] + details_temp.csv
5309   
5310    Must have a file_name keyword arg, this is what is writing to.
5311    might be a better way to do this using CSV module Writer and writeDict
5312   
5313    writes file to "output_dir" unless "completed" is in kwargs, then it writes to
5314    "file_name" kwargs
5315
5316    """
5317    import types
5318#    import os
5319   
5320    # Check that kwargs is a dictionary
5321    if type(kwargs) != types.DictType:
5322        raise TypeError
5323   
5324    #is completed is kwargs?
5325    try:
5326        kwargs['completed']
5327        completed=True
5328    except:
5329        completed=False
5330 
5331    #get file name and removes from dict and assert that a file_name exists
5332    if completed:
5333        try:
5334            file = str(kwargs['file_name'])
5335        except:
5336            raise 'kwargs must have file_name'
5337    else:
5338        #write temp file in output directory
5339        try:
5340            file = str(kwargs['output_dir'])+'detail_temp.csv'
5341        except:
5342            raise 'kwargs must have output_dir'
5343       
5344    #extracts the header info and the new line info
5345    line=''
5346    header=''
5347    count=0
5348    keys = kwargs.keys()
5349    keys.sort()
5350   
5351    #used the sorted keys to create the header and line data
5352    for k in keys:
5353#        print "%s = %s" %(k, kwargs[k])
5354        header = header+str(k)
5355        line = line+str(kwargs[k])
5356        count+=1
5357        if count <len(kwargs):
5358            header = header+','
5359            line = line+','
5360    header+='\n'
5361    line+='\n'
5362
5363    # checks the header info, if the same, then write, if not create a new file
5364    #try to open!
5365    try:
5366        fid = open(file,"r")
5367        file_header=fid.readline()
5368        fid.close()
5369        if verbose: print 'read file header %s' %file_header
5370       
5371    except:
5372        msg = 'try to create new file',file
5373        if verbose: print msg
5374        #tries to open file, maybe directory is bad
5375        try:
5376            fid = open(file,"w")
5377            fid.write(header)
5378            fid.close()
5379            file_header=header
5380        except:
5381            msg = 'cannot create new file',file
5382            raise msg
5383           
5384    #if header is same or this is a new file
5385    if file_header==str(header):
5386        fid=open(file,"a")
5387        #write new line
5388        fid.write(line)
5389        fid.close()
5390    else:
5391        #backup plan, if header is different and has completed will append info to
5392        #end of details_temp.cvs file in output directory
5393        file = str(kwargs['output_dir'])+'detail_temp.csv'
5394        fid=open(file,"a")
5395        fid.write(header)
5396        fid.write(line)
5397        fid.close()
5398        if verbose: print 'file',file_header.strip('\n')
5399        if verbose: print 'head',header.strip('\n')
5400        if file_header.strip('\n')==str(header): print 'they equal'
5401        msg = 'WARNING: File header does not match input info, the input variables have changed, suggest to change file name'
5402        print msg
5403
5404
5405
5406
5407
5408def get_maximum_inundation_elevation(filename,
5409                                     polygon=None,
5410                                     time_interval=None,
5411                                     verbose=False):
5412   
5413    """Return highest elevation where depth > 0
5414   
5415    Usage:
5416    max_runup = get_maximum_inundation_elevation(filename,
5417                                                 polygon=None,
5418                                                 time_interval=None,
5419                                                 verbose=False)
5420
5421    filename is a NetCDF sww file containing ANUGA model output.                                                       
5422    Optional arguments polygon and time_interval restricts the maximum runup calculation
5423    to a points that lie within the specified polygon and time interval.
5424
5425    If no inundation is found within polygon and time_interval the return value
5426    is None signifying "No Runup" or "Everything is dry".
5427
5428    See general function get_maximum_inundation_data for details.
5429   
5430    """
5431   
5432    runup, _ = get_maximum_inundation_data(filename,
5433                                           polygon=polygon,
5434                                           time_interval=time_interval,
5435                                           verbose=verbose)
5436    return runup
5437
5438
5439
5440
5441def get_maximum_inundation_location(filename,
5442                                    polygon=None,
5443                                    time_interval=None,
5444                                    verbose=False):
5445    """Return location of highest elevation where h > 0
5446   
5447   
5448    Usage:
5449    max_runup_location = get_maximum_inundation_location(filename,
5450                                                         polygon=None,
5451                                                         time_interval=None,
5452                                                         verbose=False)
5453
5454    filename is a NetCDF sww file containing ANUGA model output.
5455    Optional arguments polygon and time_interval restricts the maximum runup calculation
5456    to a points that lie within the specified polygon and time interval.
5457
5458    If no inundation is found within polygon and time_interval the return value
5459    is None signifying "No Runup" or "Everything is dry".
5460
5461    See general function get_maximum_inundation_data for details.
5462    """
5463   
5464    _, max_loc = get_maximum_inundation_data(filename,
5465                                             polygon=polygon,
5466                                             time_interval=time_interval,
5467                                             verbose=verbose)
5468    return max_loc
5469   
5470
5471
5472def get_maximum_inundation_data(filename, polygon=None, time_interval=None,
5473                                use_centroid_values=False,
5474                                verbose=False):
5475    """Compute maximum run up height from sww file.
5476
5477
5478    Usage:
5479    runup, location = get_maximum_inundation_data(filename,
5480                                                  polygon=None,
5481                                                  time_interval=None,
5482                                                  verbose=False)
5483   
5484
5485    Algorithm is as in get_maximum_inundation_elevation from shallow_water_domain
5486    except that this function works with the sww file and computes the maximal
5487    runup height over multiple timesteps.
5488   
5489    Optional arguments polygon and time_interval restricts the maximum runup calculation
5490    to a points that lie within the specified polygon and time interval. Polygon is
5491    assumed to be in (absolute) UTM coordinates in the same zone as domain.
5492
5493    If no inundation is found within polygon and time_interval the return value
5494    is None signifying "No Runup" or "Everything is dry".
5495    """
5496
5497    # We are using nodal values here as that is what is stored in sww files.
5498
5499    # Water depth below which it is considered to be 0 in the model
5500    # FIXME (Ole): Allow this to be specified as a keyword argument as well
5501
5502    from anuga.utilities.polygon import inside_polygon   
5503    from anuga.config import minimum_allowed_height
5504    from Scientific.IO.NetCDF import NetCDFFile
5505
5506    dir, base = os.path.split(filename)
5507           
5508    iterate_over = get_all_swwfiles(dir,base)
5509   
5510    # Read sww file
5511    if verbose: 
5512        print 'Reading from %s' %filename
5513        # FIXME: Use general swwstats (when done)
5514   
5515    maximal_runup = None
5516    maximal_runup_location = None
5517   
5518    for file, swwfile in enumerate (iterate_over):
5519       
5520        # Read sww file
5521        filename = join(dir,swwfile+'.sww')
5522       
5523        if verbose: 
5524            print 'Reading from %s' %filename
5525            # FIXME: Use general swwstats (when done)
5526               
5527        fid = NetCDFFile(filename)
5528   
5529        # Get geo_reference
5530        # sww files don't have to have a geo_ref
5531        try:
5532            geo_reference = Geo_reference(NetCDFObject=fid)
5533        except AttributeError, e:
5534            geo_reference = Geo_reference() # Default georef object
5535           
5536        xllcorner = geo_reference.get_xllcorner()
5537        yllcorner = geo_reference.get_yllcorner()
5538        zone = geo_reference.get_zone()
5539       
5540        # Get extent
5541        volumes = fid.variables['volumes'][:]   
5542        x = fid.variables['x'][:] + xllcorner
5543        y = fid.variables['y'][:] + yllcorner
5544   
5545   
5546        # Get the relevant quantities
5547        elevation = fid.variables['elevation'][:] 
5548        stage = fid.variables['stage'][:]
5549   
5550   
5551        # Here's where one could convert nodal information to centroid information
5552        # but is probably something we need to write in C.
5553        # Here's a Python thought which is NOT finished!!!
5554        if use_centroid_values is True:
5555            x = get_centroid_values(x, volumes)
5556            y = get_centroid_values(y, volumes)   
5557            elevation = get_centroid_values(elevation, volumes)   
5558   
5559   
5560        # Spatial restriction
5561        if polygon is not None:
5562            msg = 'polygon must be a sequence of points.'
5563            assert len(polygon[0]) == 2, msg
5564            # FIXME (Ole): Make a generic polygon input check in polygon.py and call it here
5565           
5566            points = concatenate((x[:,NewAxis], y[:,NewAxis]), axis=1)
5567   
5568            point_indices = inside_polygon(points, polygon)
5569   
5570            # Restrict quantities to polygon
5571            elevation = take(elevation, point_indices)
5572            stage = take(stage, point_indices, axis=1)
5573   
5574            # Get info for location of maximal runup
5575            points_in_polygon = take(points, point_indices)
5576            x = points_in_polygon[:,0]
5577            y = points_in_polygon[:,1]       
5578        else:
5579            # Take all points
5580            point_indices = arange(len(x))
5581           
5582   
5583        # Temporal restriction
5584        time = fid.variables['time'][:]
5585        all_timeindices = arange(len(time))       
5586        if time_interval is not None:
5587           
5588            msg = 'time_interval must be a sequence of length 2.'
5589            assert len(time_interval) == 2, msg
5590            msg = 'time_interval %s must not be decreasing.' %(time_interval)       
5591            assert time_interval[1] >= time_interval[0], msg
5592           
5593            msg = 'Specified time interval [%.8f:%.8f]' %tuple(time_interval)
5594            msg += ' must does not match model time interval: [%.8f, %.8f]\n'\
5595                   %(time[0], time[-1])
5596            if time_interval[1] < time[0]: raise ValueError(msg)
5597            if time_interval[0] > time[-1]: raise ValueError(msg)
5598   
5599            # Take time indices corresponding to interval (& is bitwise AND)
5600            timesteps = compress((time_interval[0] <= time) & (time <= time_interval[1]),
5601                                 all_timeindices)
5602   
5603   
5604            msg = 'time_interval %s did not include any model timesteps.' %(time_interval)       
5605            assert not alltrue(timesteps == 0), msg
5606   
5607   
5608        else:
5609            # Take them all
5610            timesteps = all_timeindices
5611       
5612   
5613        fid.close()
5614   
5615        # Compute maximal runup for each timestep
5616        #maximal_runup = None
5617        #maximal_runup_location = None
5618        #maximal_runups = [None]
5619        #maximal_runup_locations = [None]
5620       
5621        for i in timesteps:
5622            if use_centroid_values is True:
5623                stage_i = get_centroid_values(stage[i,:], volumes)               
5624            else:
5625                stage_i = stage[i,:]
5626               
5627            depth = stage_i  - elevation
5628       
5629            # Get wet nodes i.e. nodes with depth>0 within given region and timesteps
5630            wet_nodes = compress(depth > minimum_allowed_height, arange(len(depth)))
5631   
5632            if alltrue(wet_nodes == 0):
5633                runup = None
5634            else:   
5635                # Find maximum elevation among wet nodes
5636                wet_elevation = take(elevation, wet_nodes)
5637   
5638                runup_index = argmax(wet_elevation)
5639                runup = max(wet_elevation)
5640                assert wet_elevation[runup_index] == runup # Must always be True
5641            if runup > maximal_runup:
5642                maximal_runup = runup      # This works even if maximal_runups is None
5643                #print "NEW RUNUP",runup
5644   
5645                # Record location
5646                wet_x = take(x, wet_nodes)
5647                wet_y = take(y, wet_nodes)           
5648                maximal_runup_location = [wet_x[runup_index], wet_y[runup_index]]
5649   
5650    #print 'maximal_runup, maximal_runup_location',maximal_runup, maximal_runup_location
5651    return maximal_runup, maximal_runup_location
5652
5653def get_all_swwfiles(look_in_dir='',base_name='',verbose=False):
5654    '''
5655    Finds all the sww files in a "look_in_dir" which contains a "base_name".
5656    will accept base_name with or without the extension ".sww"
5657   
5658    Returns: a list of strings
5659       
5660    Usage:     iterate_over = get_all_swwfiles(dir, name)
5661    then
5662               for swwfile in iterate_over:
5663                   do stuff
5664                   
5665    Check "export_grids" and "get_maximum_inundation_data" for examples
5666    '''
5667   
5668    #plus tests the extension
5669    name, extension = os.path.splitext(base_name)
5670
5671    if extension <>'' and extension <> '.sww':
5672        msg = msg='file %s %s must be an NetCDF sww file!'%(base_name,extension)
5673        raise IOError, msg
5674
5675    if look_in_dir == "":
5676        look_in_dir = "." # Unix compatibility
5677   
5678    dir_ls = os.listdir(look_in_dir)
5679    #print 'dir_ls',dir_ls, base
5680    iterate_over = [x[:-4] for x in dir_ls if name in x and x[-4:] == '.sww']
5681    if len(iterate_over) == 0:
5682        msg = 'No files of the base name %s'\
5683              %(name)
5684        raise IOError, msg
5685    if verbose: print 'iterate over %s' %(iterate_over)
5686
5687    return iterate_over
5688
5689
5690
5691#-------------------------------------------------------------
5692if __name__ == "__main__":
5693    #setting umask from config to force permissions for all files and directories
5694    # created to the same. (it was noticed the "mpirun" doesn't honour the umask
5695    # set in your .bashrc etc file)
5696    from config import umask
5697    import os 
5698    os.umask(umask)
5699
Note: See TracBrowser for help on using the repository browser.