1 | """datamanager.py - input output for AnuGA |
---|
2 | |
---|
3 | |
---|
4 | This module takes care of reading and writing datafiles such as topograhies, |
---|
5 | model output, etc |
---|
6 | |
---|
7 | |
---|
8 | Formats used within AnuGA: |
---|
9 | |
---|
10 | .sww: Netcdf format for storing model output f(t,x,y) |
---|
11 | .tms: Netcdf format for storing time series f(t) |
---|
12 | |
---|
13 | .csv: ASCII format for storing arbitrary points and associated attributes |
---|
14 | .pts: NetCDF format for storing arbitrary points and associated attributes |
---|
15 | |
---|
16 | .asc: ASCII format of regular DEMs as output from ArcView |
---|
17 | .prj: Associated ArcView file giving more meta data for asc format |
---|
18 | .ers: ERMapper header format of regular DEMs for ArcView |
---|
19 | |
---|
20 | .dem: NetCDF representation of regular DEM data |
---|
21 | |
---|
22 | .tsh: ASCII format for storing meshes and associated boundary and region info |
---|
23 | .msh: NetCDF format for storing meshes and associated boundary and region info |
---|
24 | |
---|
25 | .nc: Native ferret NetCDF format |
---|
26 | .geo: Houdinis ascii geometry format (?) |
---|
27 | |
---|
28 | |
---|
29 | A typical dataflow can be described as follows |
---|
30 | |
---|
31 | Manually created files: |
---|
32 | ASC, PRJ: Digital elevation models (gridded) |
---|
33 | TSH: Triangular meshes (e.g. created from anuga.pmesh) |
---|
34 | NC Model outputs for use as boundary conditions (e.g from MOST) |
---|
35 | |
---|
36 | |
---|
37 | AUTOMATICALLY CREATED FILES: |
---|
38 | |
---|
39 | ASC, PRJ -> DEM -> PTS: Conversion of DEM's to native pts file |
---|
40 | |
---|
41 | NC -> SWW: Conversion of MOST bundary files to boundary sww |
---|
42 | |
---|
43 | PTS + TSH -> TSH with elevation: Least squares fit |
---|
44 | |
---|
45 | TSH -> SWW: Conversion of TSH to sww viewable using Swollen |
---|
46 | |
---|
47 | TSH + Boundary SWW -> SWW: Simluation using abstract_2d_finite_volumes |
---|
48 | |
---|
49 | """ |
---|
50 | |
---|
51 | import exceptions |
---|
52 | class TitleValueError(exceptions.Exception): pass |
---|
53 | class DataMissingValuesError(exceptions.Exception): pass |
---|
54 | class DataFileNotOpenError(exceptions.Exception): pass |
---|
55 | class DataTimeError(exceptions.Exception): pass |
---|
56 | class DataDomainError(exceptions.Exception): pass |
---|
57 | class NewQuantity(exceptions.Exception): pass |
---|
58 | |
---|
59 | |
---|
60 | |
---|
61 | import csv |
---|
62 | import os, sys |
---|
63 | import shutil |
---|
64 | from struct import unpack |
---|
65 | import array as p_array |
---|
66 | #import time, os |
---|
67 | from os import sep, path, remove, mkdir, access, F_OK, W_OK, getcwd |
---|
68 | |
---|
69 | |
---|
70 | from Numeric import concatenate, array, Float, Int, Int32, resize, \ |
---|
71 | sometrue, searchsorted, zeros, allclose, around, reshape, \ |
---|
72 | transpose, sort, NewAxis, ArrayType, compress, take, arange, \ |
---|
73 | argmax, alltrue, shape, Float32, size |
---|
74 | |
---|
75 | import string |
---|
76 | |
---|
77 | from Scientific.IO.NetCDF import NetCDFFile |
---|
78 | #from shutil import copy |
---|
79 | from os.path import exists, basename, join |
---|
80 | from os import getcwd |
---|
81 | |
---|
82 | |
---|
83 | from anuga.coordinate_transforms.redfearn import redfearn, \ |
---|
84 | convert_from_latlon_to_utm |
---|
85 | from anuga.coordinate_transforms.geo_reference import Geo_reference, \ |
---|
86 | write_NetCDF_georeference, ensure_geo_reference |
---|
87 | from anuga.geospatial_data.geospatial_data import Geospatial_data,\ |
---|
88 | ensure_absolute |
---|
89 | from anuga.config import minimum_storable_height as default_minimum_storable_height |
---|
90 | from anuga.config import max_float |
---|
91 | from anuga.utilities.numerical_tools import ensure_numeric, mean |
---|
92 | from anuga.caching.caching import myhash |
---|
93 | from anuga.utilities.anuga_exceptions import ANUGAError |
---|
94 | from anuga.shallow_water import Domain |
---|
95 | from anuga.abstract_2d_finite_volumes.pmesh2domain import \ |
---|
96 | pmesh_to_domain_instance |
---|
97 | from anuga.abstract_2d_finite_volumes.util import get_revision_number, \ |
---|
98 | remove_lone_verts, sww2timeseries, get_centroid_values |
---|
99 | from anuga.load_mesh.loadASCII import export_mesh_file |
---|
100 | from anuga.utilities.polygon import intersection |
---|
101 | |
---|
102 | |
---|
103 | # formula mappings |
---|
104 | |
---|
105 | quantity_formula = {'momentum':'(xmomentum**2 + ymomentum**2)**0.5', |
---|
106 | 'depth':'stage-elevation', |
---|
107 | 'speed': \ |
---|
108 | '(xmomentum**2 + ymomentum**2)**0.5/(stage-elevation+1.e-6/(stage-elevation))'} |
---|
109 | |
---|
110 | |
---|
111 | |
---|
112 | def make_filename(s): |
---|
113 | """Transform argument string into a Sexsuitable filename |
---|
114 | """ |
---|
115 | |
---|
116 | s = s.strip() |
---|
117 | s = s.replace(' ', '_') |
---|
118 | s = s.replace('(', '') |
---|
119 | s = s.replace(')', '') |
---|
120 | s = s.replace('__', '_') |
---|
121 | |
---|
122 | return s |
---|
123 | |
---|
124 | |
---|
125 | def check_dir(path, verbose=None): |
---|
126 | """Check that specified path exists. |
---|
127 | If path does not exist it will be created if possible |
---|
128 | |
---|
129 | USAGE: |
---|
130 | checkdir(path, verbose): |
---|
131 | |
---|
132 | ARGUMENTS: |
---|
133 | path -- Directory |
---|
134 | verbose -- Flag verbose output (default: None) |
---|
135 | |
---|
136 | RETURN VALUE: |
---|
137 | Verified path including trailing separator |
---|
138 | |
---|
139 | """ |
---|
140 | |
---|
141 | import os.path |
---|
142 | |
---|
143 | if sys.platform in ['nt', 'dos', 'win32', 'what else?']: |
---|
144 | unix = 0 |
---|
145 | else: |
---|
146 | unix = 1 |
---|
147 | |
---|
148 | |
---|
149 | if path[-1] != os.sep: |
---|
150 | path = path + os.sep # Add separator for directories |
---|
151 | |
---|
152 | path = os.path.expanduser(path) # Expand ~ or ~user in pathname |
---|
153 | if not (os.access(path,os.R_OK and os.W_OK) or path == ''): |
---|
154 | try: |
---|
155 | exitcode=os.mkdir(path) |
---|
156 | |
---|
157 | # Change access rights if possible |
---|
158 | # |
---|
159 | if unix: |
---|
160 | exitcode=os.system('chmod 775 '+path) |
---|
161 | else: |
---|
162 | pass # FIXME: What about acces rights under Windows? |
---|
163 | |
---|
164 | if verbose: print 'MESSAGE: Directory', path, 'created.' |
---|
165 | |
---|
166 | except: |
---|
167 | print 'WARNING: Directory', path, 'could not be created.' |
---|
168 | if unix: |
---|
169 | path = '/tmp/' |
---|
170 | else: |
---|
171 | path = 'C:' |
---|
172 | |
---|
173 | print 'Using directory %s instead' %path |
---|
174 | |
---|
175 | return(path) |
---|
176 | |
---|
177 | |
---|
178 | |
---|
179 | def del_dir(path): |
---|
180 | """Recursively delete directory path and all its contents |
---|
181 | """ |
---|
182 | |
---|
183 | import os |
---|
184 | |
---|
185 | if os.path.isdir(path): |
---|
186 | for file in os.listdir(path): |
---|
187 | X = os.path.join(path, file) |
---|
188 | |
---|
189 | |
---|
190 | if os.path.isdir(X) and not os.path.islink(X): |
---|
191 | del_dir(X) |
---|
192 | else: |
---|
193 | try: |
---|
194 | os.remove(X) |
---|
195 | except: |
---|
196 | print "Could not remove file %s" %X |
---|
197 | |
---|
198 | os.rmdir(path) |
---|
199 | |
---|
200 | |
---|
201 | # ANOTHER OPTION, IF NEED IN THE FUTURE, Nick B 7/2007 |
---|
202 | def rmgeneric(path, __func__,verbose=False): |
---|
203 | ERROR_STR= """Error removing %(path)s, %(error)s """ |
---|
204 | |
---|
205 | try: |
---|
206 | __func__(path) |
---|
207 | if verbose: print 'Removed ', path |
---|
208 | except OSError, (errno, strerror): |
---|
209 | print ERROR_STR % {'path' : path, 'error': strerror } |
---|
210 | |
---|
211 | def removeall(path,verbose=False): |
---|
212 | |
---|
213 | if not os.path.isdir(path): |
---|
214 | return |
---|
215 | |
---|
216 | files=os.listdir(path) |
---|
217 | |
---|
218 | for x in files: |
---|
219 | fullpath=os.path.join(path, x) |
---|
220 | if os.path.isfile(fullpath): |
---|
221 | f=os.remove |
---|
222 | rmgeneric(fullpath, f) |
---|
223 | elif os.path.isdir(fullpath): |
---|
224 | removeall(fullpath) |
---|
225 | f=os.rmdir |
---|
226 | rmgeneric(fullpath, f,verbose) |
---|
227 | |
---|
228 | |
---|
229 | |
---|
230 | def create_filename(datadir, filename, format, size=None, time=None): |
---|
231 | |
---|
232 | import os |
---|
233 | #from anuga.config import data_dir |
---|
234 | |
---|
235 | FN = check_dir(datadir) + filename |
---|
236 | |
---|
237 | if size is not None: |
---|
238 | FN += '_size%d' %size |
---|
239 | |
---|
240 | if time is not None: |
---|
241 | FN += '_time%.2f' %time |
---|
242 | |
---|
243 | FN += '.' + format |
---|
244 | return FN |
---|
245 | |
---|
246 | |
---|
247 | def get_files(datadir, filename, format, size): |
---|
248 | """Get all file (names) with given name, size and format |
---|
249 | """ |
---|
250 | |
---|
251 | import glob |
---|
252 | |
---|
253 | import os |
---|
254 | #from anuga.config import data_dir |
---|
255 | |
---|
256 | dir = check_dir(datadir) |
---|
257 | |
---|
258 | pattern = dir + os.sep + filename + '_size=%d*.%s' %(size, format) |
---|
259 | return glob.glob(pattern) |
---|
260 | |
---|
261 | |
---|
262 | |
---|
263 | #Generic class for storing output to e.g. visualisation or checkpointing |
---|
264 | class Data_format: |
---|
265 | """Generic interface to data formats |
---|
266 | """ |
---|
267 | |
---|
268 | |
---|
269 | def __init__(self, domain, extension, mode = 'w'): |
---|
270 | assert mode in ['r', 'w', 'a'], '''Mode %s must be either:''' %mode +\ |
---|
271 | ''' 'w' (write)'''+\ |
---|
272 | ''' 'r' (read)''' +\ |
---|
273 | ''' 'a' (append)''' |
---|
274 | |
---|
275 | #Create filename |
---|
276 | self.filename = create_filename(domain.get_datadir(), |
---|
277 | domain.get_name(), extension) |
---|
278 | |
---|
279 | #print 'F', self.filename |
---|
280 | self.timestep = 0 |
---|
281 | self.domain = domain |
---|
282 | |
---|
283 | |
---|
284 | |
---|
285 | # Exclude ghosts in case this is a parallel domain |
---|
286 | self.number_of_nodes = domain.number_of_full_nodes |
---|
287 | self.number_of_volumes = domain.number_of_full_triangles |
---|
288 | #self.number_of_volumes = len(domain) |
---|
289 | |
---|
290 | |
---|
291 | |
---|
292 | |
---|
293 | #FIXME: Should we have a general set_precision function? |
---|
294 | |
---|
295 | |
---|
296 | |
---|
297 | #Class for storing output to e.g. visualisation |
---|
298 | class Data_format_sww(Data_format): |
---|
299 | """Interface to native NetCDF format (.sww) for storing model output |
---|
300 | |
---|
301 | There are two kinds of data |
---|
302 | |
---|
303 | 1: Constant data: Vertex coordinates and field values. Stored once |
---|
304 | 2: Variable data: Conserved quantities. Stored once per timestep. |
---|
305 | |
---|
306 | All data is assumed to reside at vertex locations. |
---|
307 | """ |
---|
308 | |
---|
309 | |
---|
310 | def __init__(self, domain, mode = 'w',\ |
---|
311 | max_size = 2000000000, |
---|
312 | recursion = False): |
---|
313 | from Scientific.IO.NetCDF import NetCDFFile |
---|
314 | from Numeric import Int, Float, Float32 |
---|
315 | |
---|
316 | self.precision = Float32 #Use single precision for quantities |
---|
317 | if hasattr(domain, 'max_size'): |
---|
318 | self.max_size = domain.max_size #file size max is 2Gig |
---|
319 | else: |
---|
320 | self.max_size = max_size |
---|
321 | self.recursion = recursion |
---|
322 | self.mode = mode |
---|
323 | |
---|
324 | Data_format.__init__(self, domain, 'sww', mode) |
---|
325 | |
---|
326 | if hasattr(domain, 'minimum_storable_height'): |
---|
327 | self.minimum_storable_height = domain.minimum_storable_height |
---|
328 | else: |
---|
329 | self.minimum_storable_height = default_minimum_storable_height |
---|
330 | |
---|
331 | # NetCDF file definition |
---|
332 | fid = NetCDFFile(self.filename, mode) |
---|
333 | |
---|
334 | if mode == 'w': |
---|
335 | description = 'Output from anuga.abstract_2d_finite_volumes suitable for plotting' |
---|
336 | self.writer = Write_sww() |
---|
337 | self.writer.store_header(fid, |
---|
338 | domain.starttime, |
---|
339 | self.number_of_volumes, |
---|
340 | self.domain.number_of_full_nodes, |
---|
341 | description=description, |
---|
342 | smoothing=domain.smooth, |
---|
343 | order=domain.default_order, |
---|
344 | sww_precision=self.precision) |
---|
345 | |
---|
346 | # Extra optional information |
---|
347 | if hasattr(domain, 'texture'): |
---|
348 | fid.texture = domain.texture |
---|
349 | |
---|
350 | if domain.quantities_to_be_monitored is not None: |
---|
351 | fid.createDimension('singleton', 1) |
---|
352 | fid.createDimension('two', 2) |
---|
353 | |
---|
354 | poly = domain.monitor_polygon |
---|
355 | if poly is not None: |
---|
356 | N = len(poly) |
---|
357 | fid.createDimension('polygon_length', N) |
---|
358 | fid.createVariable('extrema.polygon', |
---|
359 | self.precision, |
---|
360 | ('polygon_length', |
---|
361 | 'two')) |
---|
362 | fid.variables['extrema.polygon'][:] = poly |
---|
363 | |
---|
364 | |
---|
365 | interval = domain.monitor_time_interval |
---|
366 | if interval is not None: |
---|
367 | fid.createVariable('extrema.time_interval', |
---|
368 | self.precision, |
---|
369 | ('two',)) |
---|
370 | fid.variables['extrema.time_interval'][:] = interval |
---|
371 | |
---|
372 | |
---|
373 | for q in domain.quantities_to_be_monitored: |
---|
374 | #print 'doing', q |
---|
375 | fid.createVariable(q+'.extrema', self.precision, |
---|
376 | ('numbers_in_range',)) |
---|
377 | fid.createVariable(q+'.min_location', self.precision, |
---|
378 | ('numbers_in_range',)) |
---|
379 | fid.createVariable(q+'.max_location', self.precision, |
---|
380 | ('numbers_in_range',)) |
---|
381 | fid.createVariable(q+'.min_time', self.precision, |
---|
382 | ('singleton',)) |
---|
383 | fid.createVariable(q+'.max_time', self.precision, |
---|
384 | ('singleton',)) |
---|
385 | |
---|
386 | |
---|
387 | fid.close() |
---|
388 | |
---|
389 | |
---|
390 | def store_connectivity(self): |
---|
391 | """Specialisation of store_connectivity for net CDF format |
---|
392 | |
---|
393 | Writes x,y,z coordinates of triangles constituting |
---|
394 | the bed elevation. |
---|
395 | """ |
---|
396 | |
---|
397 | from Scientific.IO.NetCDF import NetCDFFile |
---|
398 | |
---|
399 | from Numeric import concatenate, Int |
---|
400 | |
---|
401 | domain = self.domain |
---|
402 | |
---|
403 | #Get NetCDF |
---|
404 | fid = NetCDFFile(self.filename, 'a') #Open existing file for append |
---|
405 | |
---|
406 | # Get the variables |
---|
407 | x = fid.variables['x'] |
---|
408 | y = fid.variables['y'] |
---|
409 | z = fid.variables['elevation'] |
---|
410 | |
---|
411 | volumes = fid.variables['volumes'] |
---|
412 | |
---|
413 | # Get X, Y and bed elevation Z |
---|
414 | Q = domain.quantities['elevation'] |
---|
415 | X,Y,Z,V = Q.get_vertex_values(xy=True, |
---|
416 | precision=self.precision) |
---|
417 | |
---|
418 | # |
---|
419 | points = concatenate( (X[:,NewAxis],Y[:,NewAxis]), axis=1 ) |
---|
420 | self.writer.store_triangulation(fid, |
---|
421 | points, |
---|
422 | V.astype(volumes.typecode()), |
---|
423 | Z, |
---|
424 | points_georeference= \ |
---|
425 | domain.geo_reference) |
---|
426 | |
---|
427 | # Close |
---|
428 | fid.close() |
---|
429 | |
---|
430 | |
---|
431 | def store_timestep(self, names=None): |
---|
432 | """Store time and named quantities to file |
---|
433 | """ |
---|
434 | |
---|
435 | from Scientific.IO.NetCDF import NetCDFFile |
---|
436 | import types |
---|
437 | from time import sleep |
---|
438 | from os import stat |
---|
439 | |
---|
440 | from Numeric import choose |
---|
441 | |
---|
442 | |
---|
443 | if names is None: |
---|
444 | # Standard shallow water wave equation quantitites in ANUGA |
---|
445 | names = ['stage', 'xmomentum', 'ymomentum'] |
---|
446 | |
---|
447 | # Get NetCDF |
---|
448 | retries = 0 |
---|
449 | file_open = False |
---|
450 | while not file_open and retries < 10: |
---|
451 | try: |
---|
452 | fid = NetCDFFile(self.filename, 'a') # Open existing file |
---|
453 | except IOError: |
---|
454 | # This could happen if someone was reading the file. |
---|
455 | # In that case, wait a while and try again |
---|
456 | msg = 'Warning (store_timestep): File %s could not be opened'\ |
---|
457 | %self.filename |
---|
458 | msg += ' - trying step %s again' %self.domain.time |
---|
459 | print msg |
---|
460 | retries += 1 |
---|
461 | sleep(1) |
---|
462 | else: |
---|
463 | file_open = True |
---|
464 | |
---|
465 | if not file_open: |
---|
466 | msg = 'File %s could not be opened for append' %self.filename |
---|
467 | raise DataFileNotOpenError, msg |
---|
468 | |
---|
469 | |
---|
470 | |
---|
471 | # Check to see if the file is already too big: |
---|
472 | time = fid.variables['time'] |
---|
473 | i = len(time)+1 |
---|
474 | file_size = stat(self.filename)[6] |
---|
475 | file_size_increase = file_size/i |
---|
476 | if file_size + file_size_increase > self.max_size*(2**self.recursion): |
---|
477 | # In order to get the file name and start time correct, |
---|
478 | # I change the domain.filename and domain.starttime. |
---|
479 | # This is the only way to do this without changing |
---|
480 | # other modules (I think). |
---|
481 | |
---|
482 | # Write a filename addon that won't break swollens reader |
---|
483 | # (10.sww is bad) |
---|
484 | filename_ext = '_time_%s'%self.domain.time |
---|
485 | filename_ext = filename_ext.replace('.', '_') |
---|
486 | |
---|
487 | # Remember the old filename, then give domain a |
---|
488 | # name with the extension |
---|
489 | old_domain_filename = self.domain.get_name() |
---|
490 | if not self.recursion: |
---|
491 | self.domain.set_name(old_domain_filename+filename_ext) |
---|
492 | |
---|
493 | |
---|
494 | # Change the domain starttime to the current time |
---|
495 | old_domain_starttime = self.domain.starttime |
---|
496 | self.domain.starttime = self.domain.time |
---|
497 | |
---|
498 | # Build a new data_structure. |
---|
499 | next_data_structure=\ |
---|
500 | Data_format_sww(self.domain, mode=self.mode,\ |
---|
501 | max_size = self.max_size,\ |
---|
502 | recursion = self.recursion+1) |
---|
503 | if not self.recursion: |
---|
504 | print ' file_size = %s'%file_size |
---|
505 | print ' saving file to %s'%next_data_structure.filename |
---|
506 | #set up the new data_structure |
---|
507 | self.domain.writer = next_data_structure |
---|
508 | |
---|
509 | #FIXME - could be cleaner to use domain.store_timestep etc. |
---|
510 | next_data_structure.store_connectivity() |
---|
511 | next_data_structure.store_timestep(names) |
---|
512 | fid.sync() |
---|
513 | fid.close() |
---|
514 | |
---|
515 | #restore the old starttime and filename |
---|
516 | self.domain.starttime = old_domain_starttime |
---|
517 | self.domain.set_name(old_domain_filename) |
---|
518 | else: |
---|
519 | self.recursion = False |
---|
520 | domain = self.domain |
---|
521 | |
---|
522 | # Get the variables |
---|
523 | time = fid.variables['time'] |
---|
524 | stage = fid.variables['stage'] |
---|
525 | xmomentum = fid.variables['xmomentum'] |
---|
526 | ymomentum = fid.variables['ymomentum'] |
---|
527 | i = len(time) |
---|
528 | if type(names) not in [types.ListType, types.TupleType]: |
---|
529 | names = [names] |
---|
530 | |
---|
531 | if 'stage' in names and 'xmomentum' in names and \ |
---|
532 | 'ymomentum' in names: |
---|
533 | |
---|
534 | # Get stage, elevation, depth and select only those |
---|
535 | # values where minimum_storable_height is exceeded |
---|
536 | Q = domain.quantities['stage'] |
---|
537 | A, _ = Q.get_vertex_values(xy = False, |
---|
538 | precision = self.precision) |
---|
539 | z = fid.variables['elevation'] |
---|
540 | |
---|
541 | storable_indices = A-z[:] >= self.minimum_storable_height |
---|
542 | stage = choose(storable_indices, (z[:], A)) |
---|
543 | |
---|
544 | # Define a zero vector of same size and type as A |
---|
545 | # for use with momenta |
---|
546 | null = zeros(size(A), A.typecode()) |
---|
547 | |
---|
548 | # Get xmomentum where depth exceeds minimum_storable_height |
---|
549 | Q = domain.quantities['xmomentum'] |
---|
550 | xmom, _ = Q.get_vertex_values(xy = False, |
---|
551 | precision = self.precision) |
---|
552 | xmomentum = choose(storable_indices, (null, xmom)) |
---|
553 | |
---|
554 | |
---|
555 | # Get ymomentum where depth exceeds minimum_storable_height |
---|
556 | Q = domain.quantities['ymomentum'] |
---|
557 | ymom, _ = Q.get_vertex_values(xy = False, |
---|
558 | precision = self.precision) |
---|
559 | ymomentum = choose(storable_indices, (null, ymom)) |
---|
560 | |
---|
561 | # Write quantities to NetCDF |
---|
562 | self.writer.store_quantities(fid, |
---|
563 | time=self.domain.time, |
---|
564 | sww_precision=self.precision, |
---|
565 | stage=stage, |
---|
566 | xmomentum=xmomentum, |
---|
567 | ymomentum=ymomentum) |
---|
568 | else: |
---|
569 | msg = 'Quantities stored must be: stage, xmomentum, ymomentum.' |
---|
570 | msg += ' Instead I got: ' + str(names) |
---|
571 | raise Exception, msg |
---|
572 | |
---|
573 | |
---|
574 | |
---|
575 | # Update extrema if requested |
---|
576 | domain = self.domain |
---|
577 | if domain.quantities_to_be_monitored is not None: |
---|
578 | for q, info in domain.quantities_to_be_monitored.items(): |
---|
579 | |
---|
580 | if info['min'] is not None: |
---|
581 | fid.variables[q + '.extrema'][0] = info['min'] |
---|
582 | fid.variables[q + '.min_location'][:] =\ |
---|
583 | info['min_location'] |
---|
584 | fid.variables[q + '.min_time'][0] = info['min_time'] |
---|
585 | |
---|
586 | if info['max'] is not None: |
---|
587 | fid.variables[q + '.extrema'][1] = info['max'] |
---|
588 | fid.variables[q + '.max_location'][:] =\ |
---|
589 | info['max_location'] |
---|
590 | fid.variables[q + '.max_time'][0] = info['max_time'] |
---|
591 | |
---|
592 | |
---|
593 | |
---|
594 | # Flush and close |
---|
595 | fid.sync() |
---|
596 | fid.close() |
---|
597 | |
---|
598 | |
---|
599 | |
---|
600 | # Class for handling checkpoints data |
---|
601 | class Data_format_cpt(Data_format): |
---|
602 | """Interface to native NetCDF format (.cpt) |
---|
603 | """ |
---|
604 | |
---|
605 | |
---|
606 | def __init__(self, domain, mode = 'w'): |
---|
607 | from Scientific.IO.NetCDF import NetCDFFile |
---|
608 | from Numeric import Int, Float, Float |
---|
609 | |
---|
610 | self.precision = Float #Use full precision |
---|
611 | |
---|
612 | Data_format.__init__(self, domain, 'sww', mode) |
---|
613 | |
---|
614 | |
---|
615 | # NetCDF file definition |
---|
616 | fid = NetCDFFile(self.filename, mode) |
---|
617 | |
---|
618 | if mode == 'w': |
---|
619 | #Create new file |
---|
620 | fid.institution = 'Geoscience Australia' |
---|
621 | fid.description = 'Checkpoint data' |
---|
622 | #fid.smooth = domain.smooth |
---|
623 | fid.order = domain.default_order |
---|
624 | |
---|
625 | # dimension definitions |
---|
626 | fid.createDimension('number_of_volumes', self.number_of_volumes) |
---|
627 | fid.createDimension('number_of_vertices', 3) |
---|
628 | |
---|
629 | #Store info at all vertices (no smoothing) |
---|
630 | fid.createDimension('number_of_points', 3*self.number_of_volumes) |
---|
631 | fid.createDimension('number_of_timesteps', None) #extensible |
---|
632 | |
---|
633 | # variable definitions |
---|
634 | |
---|
635 | #Mesh |
---|
636 | fid.createVariable('x', self.precision, ('number_of_points',)) |
---|
637 | fid.createVariable('y', self.precision, ('number_of_points',)) |
---|
638 | |
---|
639 | |
---|
640 | fid.createVariable('volumes', Int, ('number_of_volumes', |
---|
641 | 'number_of_vertices')) |
---|
642 | |
---|
643 | fid.createVariable('time', self.precision, |
---|
644 | ('number_of_timesteps',)) |
---|
645 | |
---|
646 | #Allocate space for all quantities |
---|
647 | for name in domain.quantities.keys(): |
---|
648 | fid.createVariable(name, self.precision, |
---|
649 | ('number_of_timesteps', |
---|
650 | 'number_of_points')) |
---|
651 | |
---|
652 | #Close |
---|
653 | fid.close() |
---|
654 | |
---|
655 | |
---|
656 | def store_checkpoint(self): |
---|
657 | """ |
---|
658 | Write x,y coordinates of triangles. |
---|
659 | Write connectivity ( |
---|
660 | constituting |
---|
661 | the bed elevation. |
---|
662 | """ |
---|
663 | |
---|
664 | from Scientific.IO.NetCDF import NetCDFFile |
---|
665 | |
---|
666 | from Numeric import concatenate |
---|
667 | |
---|
668 | domain = self.domain |
---|
669 | |
---|
670 | #Get NetCDF |
---|
671 | fid = NetCDFFile(self.filename, 'a') #Open existing file for append |
---|
672 | |
---|
673 | # Get the variables |
---|
674 | x = fid.variables['x'] |
---|
675 | y = fid.variables['y'] |
---|
676 | |
---|
677 | volumes = fid.variables['volumes'] |
---|
678 | |
---|
679 | # Get X, Y and bed elevation Z |
---|
680 | Q = domain.quantities['elevation'] |
---|
681 | X,Y,Z,V = Q.get_vertex_values(xy=True, |
---|
682 | precision = self.precision) |
---|
683 | |
---|
684 | |
---|
685 | |
---|
686 | x[:] = X.astype(self.precision) |
---|
687 | y[:] = Y.astype(self.precision) |
---|
688 | z[:] = Z.astype(self.precision) |
---|
689 | |
---|
690 | volumes[:] = V |
---|
691 | |
---|
692 | #Close |
---|
693 | fid.close() |
---|
694 | |
---|
695 | |
---|
696 | def store_timestep(self, name): |
---|
697 | """Store time and named quantity to file |
---|
698 | """ |
---|
699 | from Scientific.IO.NetCDF import NetCDFFile |
---|
700 | from time import sleep |
---|
701 | |
---|
702 | #Get NetCDF |
---|
703 | retries = 0 |
---|
704 | file_open = False |
---|
705 | while not file_open and retries < 10: |
---|
706 | try: |
---|
707 | fid = NetCDFFile(self.filename, 'a') #Open existing file |
---|
708 | except IOError: |
---|
709 | #This could happen if someone was reading the file. |
---|
710 | #In that case, wait a while and try again |
---|
711 | msg = 'Warning (store_timestep): File %s could not be opened'\ |
---|
712 | %self.filename |
---|
713 | msg += ' - trying again' |
---|
714 | print msg |
---|
715 | retries += 1 |
---|
716 | sleep(1) |
---|
717 | else: |
---|
718 | file_open = True |
---|
719 | |
---|
720 | if not file_open: |
---|
721 | msg = 'File %s could not be opened for append' %self.filename |
---|
722 | raise DataFileNotOPenError, msg |
---|
723 | |
---|
724 | |
---|
725 | domain = self.domain |
---|
726 | |
---|
727 | # Get the variables |
---|
728 | time = fid.variables['time'] |
---|
729 | stage = fid.variables['stage'] |
---|
730 | i = len(time) |
---|
731 | |
---|
732 | #Store stage |
---|
733 | time[i] = self.domain.time |
---|
734 | |
---|
735 | # Get quantity |
---|
736 | Q = domain.quantities[name] |
---|
737 | A,V = Q.get_vertex_values(xy=False, |
---|
738 | precision = self.precision) |
---|
739 | |
---|
740 | stage[i,:] = A.astype(self.precision) |
---|
741 | |
---|
742 | #Flush and close |
---|
743 | fid.sync() |
---|
744 | fid.close() |
---|
745 | |
---|
746 | |
---|
747 | #### NED is national exposure database (name changed to NEXIS) |
---|
748 | |
---|
749 | LAT_TITLE = 'LATITUDE' |
---|
750 | LONG_TITLE = 'LONGITUDE' |
---|
751 | X_TITLE = 'x' |
---|
752 | Y_TITLE = 'y' |
---|
753 | class Exposure_csv: |
---|
754 | def __init__(self,file_name, latitude_title=LAT_TITLE, |
---|
755 | longitude_title=LONG_TITLE, is_x_y_locations=None, |
---|
756 | x_title=X_TITLE, y_title=Y_TITLE, |
---|
757 | refine_polygon=None, title_check_list=None): |
---|
758 | """ |
---|
759 | This class is for handling the exposure csv file. |
---|
760 | It reads the file in and converts the lats and longs to a geospatial |
---|
761 | data object. |
---|
762 | Use the methods to read and write columns. |
---|
763 | |
---|
764 | The format of the csv files it reads is; |
---|
765 | The first row is a title row. |
---|
766 | comma's are the delimiters |
---|
767 | each column is a 'set' of data |
---|
768 | |
---|
769 | Feel free to use/expand it to read other csv files. |
---|
770 | |
---|
771 | |
---|
772 | It is not for adding and deleting rows |
---|
773 | |
---|
774 | Can geospatial handle string attributes? It's not made for them. |
---|
775 | Currently it can't load and save string att's. |
---|
776 | |
---|
777 | So just use geospatial to hold the x, y and georef? Bad, since |
---|
778 | different att's are in diferent structures. Not so bad, the info |
---|
779 | to write if the .csv file is saved is in attribute_dic |
---|
780 | |
---|
781 | The location info is in the geospatial attribute. |
---|
782 | |
---|
783 | |
---|
784 | """ |
---|
785 | self._file_name = file_name |
---|
786 | self._geospatial = None # |
---|
787 | |
---|
788 | # self._attribute_dic is a dictionary. |
---|
789 | #The keys are the column titles. |
---|
790 | #The values are lists of column data |
---|
791 | |
---|
792 | # self._title_index_dic is a dictionary. |
---|
793 | #The keys are the column titles. |
---|
794 | #The values are the index positions of file columns. |
---|
795 | self._attribute_dic, self._title_index_dic = \ |
---|
796 | csv2dict(self._file_name, title_check_list=title_check_list) |
---|
797 | try: |
---|
798 | #Have code here that handles caps or lower |
---|
799 | lats = self._attribute_dic[latitude_title] |
---|
800 | longs = self._attribute_dic[longitude_title] |
---|
801 | |
---|
802 | except KeyError: |
---|
803 | # maybe a warning.. |
---|
804 | #Let's see if this works.. |
---|
805 | if False != is_x_y_locations: |
---|
806 | is_x_y_locations = True |
---|
807 | pass |
---|
808 | else: |
---|
809 | self._geospatial = Geospatial_data(latitudes = lats, |
---|
810 | longitudes = longs) |
---|
811 | |
---|
812 | if is_x_y_locations is True: |
---|
813 | if self._geospatial is not None: |
---|
814 | pass #fixme throw an error |
---|
815 | try: |
---|
816 | xs = self._attribute_dic[x_title] |
---|
817 | ys = self._attribute_dic[y_title] |
---|
818 | points = [[float(i),float(j)] for i,j in map(None,xs,ys)] |
---|
819 | except KeyError: |
---|
820 | # maybe a warning.. |
---|
821 | msg = "Could not find location information." |
---|
822 | raise TitleValueError, msg |
---|
823 | else: |
---|
824 | self._geospatial = Geospatial_data(data_points=points) |
---|
825 | |
---|
826 | # create a list of points that are in the refining_polygon |
---|
827 | # described by a list of indexes representing the points |
---|
828 | |
---|
829 | def __cmp__(self, other): |
---|
830 | #print "self._attribute_dic",self._attribute_dic |
---|
831 | #print "other._attribute_dic",other._attribute_dic |
---|
832 | #print "self._title_index_dic", self._title_index_dic |
---|
833 | #print "other._title_index_dic", other._title_index_dic |
---|
834 | |
---|
835 | #check that a is an instance of this class |
---|
836 | if isinstance(self, type(other)): |
---|
837 | result = cmp(self._attribute_dic, other._attribute_dic) |
---|
838 | if result <>0: |
---|
839 | return result |
---|
840 | # The order of the columns is important. Therefore.. |
---|
841 | result = cmp(self._title_index_dic, other._title_index_dic) |
---|
842 | if result <>0: |
---|
843 | return result |
---|
844 | for self_ls, other_ls in map(None,self._attribute_dic, \ |
---|
845 | other._attribute_dic): |
---|
846 | result = cmp(self._attribute_dic[self_ls], |
---|
847 | other._attribute_dic[other_ls]) |
---|
848 | if result <>0: |
---|
849 | return result |
---|
850 | return 0 |
---|
851 | else: |
---|
852 | return 1 |
---|
853 | |
---|
854 | |
---|
855 | def get_column(self, column_name, use_refind_polygon=False): |
---|
856 | """ |
---|
857 | Given a column name return a list of the column values |
---|
858 | |
---|
859 | Note, the type of the values will be String! |
---|
860 | do this to change a list of strings to a list of floats |
---|
861 | time = [float(x) for x in time] |
---|
862 | |
---|
863 | Not implemented: |
---|
864 | if use_refind_polygon is True, only return values in the |
---|
865 | refined polygon |
---|
866 | """ |
---|
867 | if not self._attribute_dic.has_key(column_name): |
---|
868 | msg = 'Therer is no column called %s!' %column_name |
---|
869 | raise TitleValueError, msg |
---|
870 | return self._attribute_dic[column_name] |
---|
871 | |
---|
872 | |
---|
873 | def get_value(self, value_column_name, |
---|
874 | known_column_name, |
---|
875 | known_values, |
---|
876 | use_refind_polygon=False): |
---|
877 | """ |
---|
878 | Do linear interpolation on the known_colum, using the known_value, |
---|
879 | to return a value of the column_value_name. |
---|
880 | """ |
---|
881 | pass |
---|
882 | |
---|
883 | |
---|
884 | def get_location(self, use_refind_polygon=False): |
---|
885 | """ |
---|
886 | Return a geospatial object which describes the |
---|
887 | locations of the location file. |
---|
888 | |
---|
889 | Note, if there is not location info, this returns None. |
---|
890 | |
---|
891 | Not implemented: |
---|
892 | if use_refind_polygon is True, only return values in the |
---|
893 | refined polygon |
---|
894 | """ |
---|
895 | return self._geospatial |
---|
896 | |
---|
897 | def set_column(self, column_name, column_values, overwrite=False): |
---|
898 | """ |
---|
899 | Add a column to the 'end' (with the right most column being the end) |
---|
900 | of the csv file. |
---|
901 | |
---|
902 | Set overwrite to True if you want to overwrite a column. |
---|
903 | |
---|
904 | Note, in column_name white space is removed and case is not checked. |
---|
905 | Precondition |
---|
906 | The column_name and column_values cannot have comma's in it. |
---|
907 | """ |
---|
908 | |
---|
909 | value_row_count = \ |
---|
910 | len(self._attribute_dic[self._title_index_dic.keys()[0]]) |
---|
911 | if len(column_values) <> value_row_count: |
---|
912 | msg = 'The number of column values must equal the number of rows.' |
---|
913 | raise DataMissingValuesError, msg |
---|
914 | |
---|
915 | if self._attribute_dic.has_key(column_name): |
---|
916 | if not overwrite: |
---|
917 | msg = 'Column name %s already in use!' %column_name |
---|
918 | raise TitleValueError, msg |
---|
919 | else: |
---|
920 | # New title. Add it to the title index. |
---|
921 | self._title_index_dic[column_name] = len(self._title_index_dic) |
---|
922 | self._attribute_dic[column_name] = column_values |
---|
923 | #print "self._title_index_dic[column_name]",self._title_index_dic |
---|
924 | |
---|
925 | def save(self, file_name=None): |
---|
926 | """ |
---|
927 | Save the exposure csv file |
---|
928 | """ |
---|
929 | if file_name is None: |
---|
930 | file_name = self._file_name |
---|
931 | |
---|
932 | fd = open(file_name,'wb') |
---|
933 | writer = csv.writer(fd) |
---|
934 | |
---|
935 | #Write the title to a cvs file |
---|
936 | line = [None]* len(self._title_index_dic) |
---|
937 | for title in self._title_index_dic.iterkeys(): |
---|
938 | line[self._title_index_dic[title]]= title |
---|
939 | writer.writerow(line) |
---|
940 | |
---|
941 | # Write the values to a cvs file |
---|
942 | value_row_count = \ |
---|
943 | len(self._attribute_dic[self._title_index_dic.keys()[0]]) |
---|
944 | for row_i in range(value_row_count): |
---|
945 | line = [None]* len(self._title_index_dic) |
---|
946 | for title in self._title_index_dic.iterkeys(): |
---|
947 | line[self._title_index_dic[title]]= \ |
---|
948 | self._attribute_dic[title][row_i] |
---|
949 | writer.writerow(line) |
---|
950 | |
---|
951 | |
---|
952 | def csv2dict(file_name, title_check_list=None): |
---|
953 | """ |
---|
954 | Load in the csv as a dic, title as key and column info as value, . |
---|
955 | Also, create a dic, title as key and column index as value, |
---|
956 | to keep track of the column order. |
---|
957 | |
---|
958 | Two dictionaries are returned. |
---|
959 | |
---|
960 | WARNING: Vaules are returned as strings. |
---|
961 | do this to change a list of strings to a list of floats |
---|
962 | time = [float(x) for x in time] |
---|
963 | |
---|
964 | |
---|
965 | """ |
---|
966 | |
---|
967 | # |
---|
968 | attribute_dic = {} |
---|
969 | title_index_dic = {} |
---|
970 | titles_stripped = [] # list of titles |
---|
971 | reader = csv.reader(file(file_name)) |
---|
972 | |
---|
973 | # Read in and manipulate the title info |
---|
974 | titles = reader.next() |
---|
975 | for i,title in enumerate(titles): |
---|
976 | titles_stripped.append(title.strip()) |
---|
977 | title_index_dic[title.strip()] = i |
---|
978 | title_count = len(titles_stripped) |
---|
979 | #print "title_index_dic",title_index_dic |
---|
980 | if title_check_list is not None: |
---|
981 | for title_check in title_check_list: |
---|
982 | #msg = "Reading error. This row is not present ", title_check |
---|
983 | #assert title_index_dic.has_key(title_check), msg |
---|
984 | if not title_index_dic.has_key(title_check): |
---|
985 | #reader.close() |
---|
986 | msg = "Reading error. This row is not present ", \ |
---|
987 | title_check |
---|
988 | raise IOError, msg |
---|
989 | |
---|
990 | |
---|
991 | |
---|
992 | #create a dic of colum values, indexed by column title |
---|
993 | for line in reader: |
---|
994 | if len(line) <> title_count: |
---|
995 | raise IOError #FIXME make this nicer |
---|
996 | for i, value in enumerate(line): |
---|
997 | attribute_dic.setdefault(titles_stripped[i],[]).append(value) |
---|
998 | |
---|
999 | return attribute_dic, title_index_dic |
---|
1000 | |
---|
1001 | |
---|
1002 | #Auxiliary |
---|
1003 | def write_obj(filename,x,y,z): |
---|
1004 | """Store x,y,z vectors into filename (obj format) |
---|
1005 | Vectors are assumed to have dimension (M,3) where |
---|
1006 | M corresponds to the number elements. |
---|
1007 | triangles are assumed to be disconnected |
---|
1008 | |
---|
1009 | The three numbers in each vector correspond to three vertices, |
---|
1010 | |
---|
1011 | e.g. the x coordinate of vertex 1 of element i is in x[i,1] |
---|
1012 | |
---|
1013 | """ |
---|
1014 | #print 'Writing obj to %s' % filename |
---|
1015 | |
---|
1016 | import os.path |
---|
1017 | |
---|
1018 | root, ext = os.path.splitext(filename) |
---|
1019 | if ext == '.obj': |
---|
1020 | FN = filename |
---|
1021 | else: |
---|
1022 | FN = filename + '.obj' |
---|
1023 | |
---|
1024 | |
---|
1025 | outfile = open(FN, 'wb') |
---|
1026 | outfile.write("# Triangulation as an obj file\n") |
---|
1027 | |
---|
1028 | M, N = x.shape |
---|
1029 | assert N==3 #Assuming three vertices per element |
---|
1030 | |
---|
1031 | for i in range(M): |
---|
1032 | for j in range(N): |
---|
1033 | outfile.write("v %f %f %f\n" % (x[i,j],y[i,j],z[i,j])) |
---|
1034 | |
---|
1035 | for i in range(M): |
---|
1036 | base = i*N |
---|
1037 | outfile.write("f %d %d %d\n" % (base+1,base+2,base+3)) |
---|
1038 | |
---|
1039 | outfile.close() |
---|
1040 | |
---|
1041 | |
---|
1042 | ######################################################### |
---|
1043 | #Conversion routines |
---|
1044 | ######################################################## |
---|
1045 | |
---|
1046 | def sww2obj(basefilename, size): |
---|
1047 | """Convert netcdf based data output to obj |
---|
1048 | """ |
---|
1049 | from Scientific.IO.NetCDF import NetCDFFile |
---|
1050 | |
---|
1051 | from Numeric import Float, zeros |
---|
1052 | |
---|
1053 | #Get NetCDF |
---|
1054 | FN = create_filename('.', basefilename, 'sww', size) |
---|
1055 | print 'Reading from ', FN |
---|
1056 | fid = NetCDFFile(FN, 'r') #Open existing file for read |
---|
1057 | |
---|
1058 | |
---|
1059 | # Get the variables |
---|
1060 | x = fid.variables['x'] |
---|
1061 | y = fid.variables['y'] |
---|
1062 | z = fid.variables['elevation'] |
---|
1063 | time = fid.variables['time'] |
---|
1064 | stage = fid.variables['stage'] |
---|
1065 | |
---|
1066 | M = size #Number of lines |
---|
1067 | xx = zeros((M,3), Float) |
---|
1068 | yy = zeros((M,3), Float) |
---|
1069 | zz = zeros((M,3), Float) |
---|
1070 | |
---|
1071 | for i in range(M): |
---|
1072 | for j in range(3): |
---|
1073 | xx[i,j] = x[i+j*M] |
---|
1074 | yy[i,j] = y[i+j*M] |
---|
1075 | zz[i,j] = z[i+j*M] |
---|
1076 | |
---|
1077 | #Write obj for bathymetry |
---|
1078 | FN = create_filename('.', basefilename, 'obj', size) |
---|
1079 | write_obj(FN,xx,yy,zz) |
---|
1080 | |
---|
1081 | |
---|
1082 | #Now read all the data with variable information, combine with |
---|
1083 | #x,y info and store as obj |
---|
1084 | |
---|
1085 | for k in range(len(time)): |
---|
1086 | t = time[k] |
---|
1087 | print 'Processing timestep %f' %t |
---|
1088 | |
---|
1089 | for i in range(M): |
---|
1090 | for j in range(3): |
---|
1091 | zz[i,j] = stage[k,i+j*M] |
---|
1092 | |
---|
1093 | |
---|
1094 | #Write obj for variable data |
---|
1095 | #FN = create_filename(basefilename, 'obj', size, time=t) |
---|
1096 | FN = create_filename('.', basefilename[:5], 'obj', size, time=t) |
---|
1097 | write_obj(FN,xx,yy,zz) |
---|
1098 | |
---|
1099 | |
---|
1100 | def dat2obj(basefilename): |
---|
1101 | """Convert line based data output to obj |
---|
1102 | FIXME: Obsolete? |
---|
1103 | """ |
---|
1104 | |
---|
1105 | import glob, os |
---|
1106 | from anuga.config import data_dir |
---|
1107 | |
---|
1108 | |
---|
1109 | #Get bathymetry and x,y's |
---|
1110 | lines = open(data_dir+os.sep+basefilename+'_geometry.dat', 'r').readlines() |
---|
1111 | |
---|
1112 | from Numeric import zeros, Float |
---|
1113 | |
---|
1114 | M = len(lines) #Number of lines |
---|
1115 | x = zeros((M,3), Float) |
---|
1116 | y = zeros((M,3), Float) |
---|
1117 | z = zeros((M,3), Float) |
---|
1118 | |
---|
1119 | ##i = 0 |
---|
1120 | for i, line in enumerate(lines): |
---|
1121 | tokens = line.split() |
---|
1122 | values = map(float,tokens) |
---|
1123 | |
---|
1124 | for j in range(3): |
---|
1125 | x[i,j] = values[j*3] |
---|
1126 | y[i,j] = values[j*3+1] |
---|
1127 | z[i,j] = values[j*3+2] |
---|
1128 | |
---|
1129 | ##i += 1 |
---|
1130 | |
---|
1131 | |
---|
1132 | #Write obj for bathymetry |
---|
1133 | write_obj(data_dir+os.sep+basefilename+'_geometry',x,y,z) |
---|
1134 | |
---|
1135 | |
---|
1136 | #Now read all the data files with variable information, combine with |
---|
1137 | #x,y info |
---|
1138 | #and store as obj |
---|
1139 | |
---|
1140 | files = glob.glob(data_dir+os.sep+basefilename+'*.dat') |
---|
1141 | |
---|
1142 | for filename in files: |
---|
1143 | print 'Processing %s' % filename |
---|
1144 | |
---|
1145 | lines = open(data_dir+os.sep+filename,'r').readlines() |
---|
1146 | assert len(lines) == M |
---|
1147 | root, ext = os.path.splitext(filename) |
---|
1148 | |
---|
1149 | #Get time from filename |
---|
1150 | i0 = filename.find('_time=') |
---|
1151 | if i0 == -1: |
---|
1152 | #Skip bathymetry file |
---|
1153 | continue |
---|
1154 | |
---|
1155 | i0 += 6 #Position where time starts |
---|
1156 | i1 = filename.find('.dat') |
---|
1157 | |
---|
1158 | if i1 > i0: |
---|
1159 | t = float(filename[i0:i1]) |
---|
1160 | else: |
---|
1161 | raise DataTimeError, 'Hmmmm' |
---|
1162 | |
---|
1163 | |
---|
1164 | |
---|
1165 | ##i = 0 |
---|
1166 | for i, line in enumerate(lines): |
---|
1167 | tokens = line.split() |
---|
1168 | values = map(float,tokens) |
---|
1169 | |
---|
1170 | for j in range(3): |
---|
1171 | z[i,j] = values[j] |
---|
1172 | |
---|
1173 | ##i += 1 |
---|
1174 | |
---|
1175 | #Write obj for variable data |
---|
1176 | write_obj(data_dir+os.sep+basefilename+'_time=%.4f' %t,x,y,z) |
---|
1177 | |
---|
1178 | |
---|
1179 | def filter_netcdf(filename1, filename2, first=0, last=None, step = 1): |
---|
1180 | """Read netcdf filename1, pick timesteps first:step:last and save to |
---|
1181 | nettcdf file filename2 |
---|
1182 | """ |
---|
1183 | from Scientific.IO.NetCDF import NetCDFFile |
---|
1184 | |
---|
1185 | #Get NetCDF |
---|
1186 | infile = NetCDFFile(filename1, 'r') #Open existing file for read |
---|
1187 | outfile = NetCDFFile(filename2, 'w') #Open new file |
---|
1188 | |
---|
1189 | |
---|
1190 | #Copy dimensions |
---|
1191 | for d in infile.dimensions: |
---|
1192 | outfile.createDimension(d, infile.dimensions[d]) |
---|
1193 | |
---|
1194 | for name in infile.variables: |
---|
1195 | var = infile.variables[name] |
---|
1196 | outfile.createVariable(name, var.typecode(), var.dimensions) |
---|
1197 | |
---|
1198 | |
---|
1199 | #Copy the static variables |
---|
1200 | for name in infile.variables: |
---|
1201 | if name == 'time' or name == 'stage': |
---|
1202 | pass |
---|
1203 | else: |
---|
1204 | #Copy |
---|
1205 | outfile.variables[name][:] = infile.variables[name][:] |
---|
1206 | |
---|
1207 | #Copy selected timesteps |
---|
1208 | time = infile.variables['time'] |
---|
1209 | stage = infile.variables['stage'] |
---|
1210 | |
---|
1211 | newtime = outfile.variables['time'] |
---|
1212 | newstage = outfile.variables['stage'] |
---|
1213 | |
---|
1214 | if last is None: |
---|
1215 | last = len(time) |
---|
1216 | |
---|
1217 | selection = range(first, last, step) |
---|
1218 | for i, j in enumerate(selection): |
---|
1219 | print 'Copying timestep %d of %d (%f)' %(j, last-first, time[j]) |
---|
1220 | newtime[i] = time[j] |
---|
1221 | newstage[i,:] = stage[j,:] |
---|
1222 | |
---|
1223 | #Close |
---|
1224 | infile.close() |
---|
1225 | outfile.close() |
---|
1226 | |
---|
1227 | |
---|
1228 | #Get data objects |
---|
1229 | def get_dataobject(domain, mode='w'): |
---|
1230 | """Return instance of class of given format using filename |
---|
1231 | """ |
---|
1232 | |
---|
1233 | cls = eval('Data_format_%s' %domain.format) |
---|
1234 | return cls(domain, mode) |
---|
1235 | |
---|
1236 | |
---|
1237 | |
---|
1238 | |
---|
1239 | def dem2pts(basename_in, basename_out=None, |
---|
1240 | easting_min=None, easting_max=None, |
---|
1241 | northing_min=None, northing_max=None, |
---|
1242 | use_cache=False, verbose=False,): |
---|
1243 | """Read Digitial Elevation model from the following NetCDF format (.dem) |
---|
1244 | |
---|
1245 | Example: |
---|
1246 | |
---|
1247 | ncols 3121 |
---|
1248 | nrows 1800 |
---|
1249 | xllcorner 722000 |
---|
1250 | yllcorner 5893000 |
---|
1251 | cellsize 25 |
---|
1252 | NODATA_value -9999 |
---|
1253 | 138.3698 137.4194 136.5062 135.5558 .......... |
---|
1254 | |
---|
1255 | Convert to NetCDF pts format which is |
---|
1256 | |
---|
1257 | points: (Nx2) Float array |
---|
1258 | elevation: N Float array |
---|
1259 | """ |
---|
1260 | |
---|
1261 | |
---|
1262 | |
---|
1263 | kwargs = {'basename_out': basename_out, |
---|
1264 | 'easting_min': easting_min, |
---|
1265 | 'easting_max': easting_max, |
---|
1266 | 'northing_min': northing_min, |
---|
1267 | 'northing_max': northing_max, |
---|
1268 | 'verbose': verbose} |
---|
1269 | |
---|
1270 | if use_cache is True: |
---|
1271 | from caching import cache |
---|
1272 | result = cache(_dem2pts, basename_in, kwargs, |
---|
1273 | dependencies = [basename_in + '.dem'], |
---|
1274 | verbose = verbose) |
---|
1275 | |
---|
1276 | else: |
---|
1277 | result = apply(_dem2pts, [basename_in], kwargs) |
---|
1278 | |
---|
1279 | return result |
---|
1280 | |
---|
1281 | |
---|
1282 | def _dem2pts(basename_in, basename_out=None, verbose=False, |
---|
1283 | easting_min=None, easting_max=None, |
---|
1284 | northing_min=None, northing_max=None): |
---|
1285 | """Read Digitial Elevation model from the following NetCDF format (.dem) |
---|
1286 | |
---|
1287 | Internal function. See public function dem2pts for details. |
---|
1288 | """ |
---|
1289 | |
---|
1290 | # FIXME: Can this be written feasibly using write_pts? |
---|
1291 | |
---|
1292 | import os |
---|
1293 | from Scientific.IO.NetCDF import NetCDFFile |
---|
1294 | from Numeric import Float, zeros, reshape, sum |
---|
1295 | |
---|
1296 | root = basename_in |
---|
1297 | |
---|
1298 | # Get NetCDF |
---|
1299 | infile = NetCDFFile(root + '.dem', 'r') # Open existing netcdf file for read |
---|
1300 | |
---|
1301 | if verbose: print 'Reading DEM from %s' %(root + '.dem') |
---|
1302 | |
---|
1303 | ncols = infile.ncols[0] |
---|
1304 | nrows = infile.nrows[0] |
---|
1305 | xllcorner = infile.xllcorner[0] # Easting of lower left corner |
---|
1306 | yllcorner = infile.yllcorner[0] # Northing of lower left corner |
---|
1307 | cellsize = infile.cellsize[0] |
---|
1308 | NODATA_value = infile.NODATA_value[0] |
---|
1309 | dem_elevation = infile.variables['elevation'] |
---|
1310 | |
---|
1311 | zone = infile.zone[0] |
---|
1312 | false_easting = infile.false_easting[0] |
---|
1313 | false_northing = infile.false_northing[0] |
---|
1314 | |
---|
1315 | # Text strings |
---|
1316 | projection = infile.projection |
---|
1317 | datum = infile.datum |
---|
1318 | units = infile.units |
---|
1319 | |
---|
1320 | |
---|
1321 | # Get output file |
---|
1322 | if basename_out == None: |
---|
1323 | ptsname = root + '.pts' |
---|
1324 | else: |
---|
1325 | ptsname = basename_out + '.pts' |
---|
1326 | |
---|
1327 | if verbose: print 'Store to NetCDF file %s' %ptsname |
---|
1328 | # NetCDF file definition |
---|
1329 | outfile = NetCDFFile(ptsname, 'w') |
---|
1330 | |
---|
1331 | # Create new file |
---|
1332 | outfile.institution = 'Geoscience Australia' |
---|
1333 | outfile.description = 'NetCDF pts format for compact and portable storage ' +\ |
---|
1334 | 'of spatial point data' |
---|
1335 | # Assign default values |
---|
1336 | if easting_min is None: easting_min = xllcorner |
---|
1337 | if easting_max is None: easting_max = xllcorner + ncols*cellsize |
---|
1338 | if northing_min is None: northing_min = yllcorner |
---|
1339 | if northing_max is None: northing_max = yllcorner + nrows*cellsize |
---|
1340 | |
---|
1341 | # Compute offsets to update georeferencing |
---|
1342 | easting_offset = xllcorner - easting_min |
---|
1343 | northing_offset = yllcorner - northing_min |
---|
1344 | |
---|
1345 | # Georeferencing |
---|
1346 | outfile.zone = zone |
---|
1347 | outfile.xllcorner = easting_min # Easting of lower left corner |
---|
1348 | outfile.yllcorner = northing_min # Northing of lower left corner |
---|
1349 | outfile.false_easting = false_easting |
---|
1350 | outfile.false_northing = false_northing |
---|
1351 | |
---|
1352 | outfile.projection = projection |
---|
1353 | outfile.datum = datum |
---|
1354 | outfile.units = units |
---|
1355 | |
---|
1356 | |
---|
1357 | # Grid info (FIXME: probably not going to be used, but heck) |
---|
1358 | outfile.ncols = ncols |
---|
1359 | outfile.nrows = nrows |
---|
1360 | |
---|
1361 | dem_elevation_r = reshape(dem_elevation, (nrows, ncols)) |
---|
1362 | totalnopoints = nrows*ncols |
---|
1363 | |
---|
1364 | # Calculating number of NODATA_values for each row in clipped region |
---|
1365 | # FIXME: use array operations to do faster |
---|
1366 | nn = 0 |
---|
1367 | k = 0 |
---|
1368 | i1_0 = 0 |
---|
1369 | j1_0 = 0 |
---|
1370 | thisj = 0 |
---|
1371 | thisi = 0 |
---|
1372 | for i in range(nrows): |
---|
1373 | y = (nrows-i-1)*cellsize + yllcorner |
---|
1374 | for j in range(ncols): |
---|
1375 | x = j*cellsize + xllcorner |
---|
1376 | if easting_min <= x <= easting_max and \ |
---|
1377 | northing_min <= y <= northing_max: |
---|
1378 | thisj = j |
---|
1379 | thisi = i |
---|
1380 | if dem_elevation_r[i,j] == NODATA_value: nn += 1 |
---|
1381 | |
---|
1382 | if k == 0: |
---|
1383 | i1_0 = i |
---|
1384 | j1_0 = j |
---|
1385 | k += 1 |
---|
1386 | |
---|
1387 | index1 = j1_0 |
---|
1388 | index2 = thisj |
---|
1389 | |
---|
1390 | # Dimension definitions |
---|
1391 | nrows_in_bounding_box = int(round((northing_max-northing_min)/cellsize)) |
---|
1392 | ncols_in_bounding_box = int(round((easting_max-easting_min)/cellsize)) |
---|
1393 | |
---|
1394 | clippednopoints = (thisi+1-i1_0)*(thisj+1-j1_0) |
---|
1395 | nopoints = clippednopoints-nn |
---|
1396 | |
---|
1397 | clipped_dem_elev = dem_elevation_r[i1_0:thisi+1,j1_0:thisj+1] |
---|
1398 | |
---|
1399 | if verbose: |
---|
1400 | print 'There are %d values in the elevation' %totalnopoints |
---|
1401 | print 'There are %d values in the clipped elevation' %clippednopoints |
---|
1402 | print 'There are %d NODATA_values in the clipped elevation' %nn |
---|
1403 | |
---|
1404 | outfile.createDimension('number_of_points', nopoints) |
---|
1405 | outfile.createDimension('number_of_dimensions', 2) #This is 2d data |
---|
1406 | |
---|
1407 | # Variable definitions |
---|
1408 | outfile.createVariable('points', Float, ('number_of_points', |
---|
1409 | 'number_of_dimensions')) |
---|
1410 | outfile.createVariable('elevation', Float, ('number_of_points',)) |
---|
1411 | |
---|
1412 | # Get handles to the variables |
---|
1413 | points = outfile.variables['points'] |
---|
1414 | elevation = outfile.variables['elevation'] |
---|
1415 | |
---|
1416 | lenv = index2-index1+1 |
---|
1417 | # Store data |
---|
1418 | global_index = 0 |
---|
1419 | # for i in range(nrows): |
---|
1420 | for i in range(i1_0,thisi+1,1): |
---|
1421 | if verbose and i%((nrows+10)/10)==0: |
---|
1422 | print 'Processing row %d of %d' %(i, nrows) |
---|
1423 | |
---|
1424 | lower_index = global_index |
---|
1425 | |
---|
1426 | v = dem_elevation_r[i,index1:index2+1] |
---|
1427 | no_NODATA = sum(v == NODATA_value) |
---|
1428 | if no_NODATA > 0: |
---|
1429 | newcols = lenv - no_NODATA # ncols_in_bounding_box - no_NODATA |
---|
1430 | else: |
---|
1431 | newcols = lenv # ncols_in_bounding_box |
---|
1432 | |
---|
1433 | telev = zeros(newcols, Float) |
---|
1434 | tpoints = zeros((newcols, 2), Float) |
---|
1435 | |
---|
1436 | local_index = 0 |
---|
1437 | |
---|
1438 | y = (nrows-i-1)*cellsize + yllcorner |
---|
1439 | #for j in range(ncols): |
---|
1440 | for j in range(j1_0,index2+1,1): |
---|
1441 | |
---|
1442 | x = j*cellsize + xllcorner |
---|
1443 | if easting_min <= x <= easting_max and \ |
---|
1444 | northing_min <= y <= northing_max and \ |
---|
1445 | dem_elevation_r[i,j] <> NODATA_value: |
---|
1446 | tpoints[local_index, :] = [x-easting_min,y-northing_min] |
---|
1447 | telev[local_index] = dem_elevation_r[i, j] |
---|
1448 | global_index += 1 |
---|
1449 | local_index += 1 |
---|
1450 | |
---|
1451 | upper_index = global_index |
---|
1452 | |
---|
1453 | if upper_index == lower_index + newcols: |
---|
1454 | points[lower_index:upper_index, :] = tpoints |
---|
1455 | elevation[lower_index:upper_index] = telev |
---|
1456 | |
---|
1457 | assert global_index == nopoints, 'index not equal to number of points' |
---|
1458 | |
---|
1459 | infile.close() |
---|
1460 | outfile.close() |
---|
1461 | |
---|
1462 | |
---|
1463 | |
---|
1464 | def _read_hecras_cross_sections(lines): |
---|
1465 | """Return block of surface lines for each cross section |
---|
1466 | Starts with SURFACE LINE, |
---|
1467 | Ends with END CROSS-SECTION |
---|
1468 | """ |
---|
1469 | |
---|
1470 | points = [] |
---|
1471 | |
---|
1472 | reading_surface = False |
---|
1473 | for i, line in enumerate(lines): |
---|
1474 | |
---|
1475 | if len(line.strip()) == 0: #Ignore blanks |
---|
1476 | continue |
---|
1477 | |
---|
1478 | if lines[i].strip().startswith('SURFACE LINE'): |
---|
1479 | reading_surface = True |
---|
1480 | continue |
---|
1481 | |
---|
1482 | if lines[i].strip().startswith('END') and reading_surface: |
---|
1483 | yield points |
---|
1484 | reading_surface = False |
---|
1485 | points = [] |
---|
1486 | |
---|
1487 | if reading_surface: |
---|
1488 | fields = line.strip().split(',') |
---|
1489 | easting = float(fields[0]) |
---|
1490 | northing = float(fields[1]) |
---|
1491 | elevation = float(fields[2]) |
---|
1492 | points.append([easting, northing, elevation]) |
---|
1493 | |
---|
1494 | |
---|
1495 | |
---|
1496 | |
---|
1497 | def hecras_cross_sections2pts(basename_in, |
---|
1498 | basename_out=None, |
---|
1499 | verbose=False): |
---|
1500 | """Read HEC-RAS Elevation datal from the following ASCII format (.sdf) |
---|
1501 | |
---|
1502 | Example: |
---|
1503 | |
---|
1504 | |
---|
1505 | # RAS export file created on Mon 15Aug2005 11:42 |
---|
1506 | # by HEC-RAS Version 3.1.1 |
---|
1507 | |
---|
1508 | BEGIN HEADER: |
---|
1509 | UNITS: METRIC |
---|
1510 | DTM TYPE: TIN |
---|
1511 | DTM: v:\1\cit\perth_topo\river_tin |
---|
1512 | STREAM LAYER: c:\local\hecras\21_02_03\up_canning_cent3d.shp |
---|
1513 | CROSS-SECTION LAYER: c:\local\hecras\21_02_03\up_can_xs3d.shp |
---|
1514 | MAP PROJECTION: UTM |
---|
1515 | PROJECTION ZONE: 50 |
---|
1516 | DATUM: AGD66 |
---|
1517 | VERTICAL DATUM: |
---|
1518 | NUMBER OF REACHES: 19 |
---|
1519 | NUMBER OF CROSS-SECTIONS: 14206 |
---|
1520 | END HEADER: |
---|
1521 | |
---|
1522 | |
---|
1523 | Only the SURFACE LINE data of the following form will be utilised |
---|
1524 | |
---|
1525 | CROSS-SECTION: |
---|
1526 | STREAM ID:Southern-Wungong |
---|
1527 | REACH ID:Southern-Wungong |
---|
1528 | STATION:19040.* |
---|
1529 | CUT LINE: |
---|
1530 | 405548.671603161 , 6438142.7594925 |
---|
1531 | 405734.536092045 , 6438326.10404912 |
---|
1532 | 405745.130459356 , 6438331.48627354 |
---|
1533 | 405813.89633823 , 6438368.6272789 |
---|
1534 | SURFACE LINE: |
---|
1535 | 405548.67, 6438142.76, 35.37 |
---|
1536 | 405552.24, 6438146.28, 35.41 |
---|
1537 | 405554.78, 6438148.78, 35.44 |
---|
1538 | 405555.80, 6438149.79, 35.44 |
---|
1539 | 405559.37, 6438153.31, 35.45 |
---|
1540 | 405560.88, 6438154.81, 35.44 |
---|
1541 | 405562.93, 6438156.83, 35.42 |
---|
1542 | 405566.50, 6438160.35, 35.38 |
---|
1543 | 405566.99, 6438160.83, 35.37 |
---|
1544 | ... |
---|
1545 | END CROSS-SECTION |
---|
1546 | |
---|
1547 | Convert to NetCDF pts format which is |
---|
1548 | |
---|
1549 | points: (Nx2) Float array |
---|
1550 | elevation: N Float array |
---|
1551 | """ |
---|
1552 | |
---|
1553 | import os |
---|
1554 | from Scientific.IO.NetCDF import NetCDFFile |
---|
1555 | from Numeric import Float, zeros, reshape |
---|
1556 | |
---|
1557 | root = basename_in |
---|
1558 | |
---|
1559 | #Get ASCII file |
---|
1560 | infile = open(root + '.sdf', 'r') #Open SDF file for read |
---|
1561 | |
---|
1562 | if verbose: print 'Reading DEM from %s' %(root + '.sdf') |
---|
1563 | |
---|
1564 | lines = infile.readlines() |
---|
1565 | infile.close() |
---|
1566 | |
---|
1567 | if verbose: print 'Converting to pts format' |
---|
1568 | |
---|
1569 | i = 0 |
---|
1570 | while lines[i].strip() == '' or lines[i].strip().startswith('#'): |
---|
1571 | i += 1 |
---|
1572 | |
---|
1573 | assert lines[i].strip().upper() == 'BEGIN HEADER:' |
---|
1574 | i += 1 |
---|
1575 | |
---|
1576 | assert lines[i].strip().upper().startswith('UNITS:') |
---|
1577 | units = lines[i].strip().split()[1] |
---|
1578 | i += 1 |
---|
1579 | |
---|
1580 | assert lines[i].strip().upper().startswith('DTM TYPE:') |
---|
1581 | i += 1 |
---|
1582 | |
---|
1583 | assert lines[i].strip().upper().startswith('DTM:') |
---|
1584 | i += 1 |
---|
1585 | |
---|
1586 | assert lines[i].strip().upper().startswith('STREAM') |
---|
1587 | i += 1 |
---|
1588 | |
---|
1589 | assert lines[i].strip().upper().startswith('CROSS') |
---|
1590 | i += 1 |
---|
1591 | |
---|
1592 | assert lines[i].strip().upper().startswith('MAP PROJECTION:') |
---|
1593 | projection = lines[i].strip().split(':')[1] |
---|
1594 | i += 1 |
---|
1595 | |
---|
1596 | assert lines[i].strip().upper().startswith('PROJECTION ZONE:') |
---|
1597 | zone = int(lines[i].strip().split(':')[1]) |
---|
1598 | i += 1 |
---|
1599 | |
---|
1600 | assert lines[i].strip().upper().startswith('DATUM:') |
---|
1601 | datum = lines[i].strip().split(':')[1] |
---|
1602 | i += 1 |
---|
1603 | |
---|
1604 | assert lines[i].strip().upper().startswith('VERTICAL DATUM:') |
---|
1605 | i += 1 |
---|
1606 | |
---|
1607 | assert lines[i].strip().upper().startswith('NUMBER OF REACHES:') |
---|
1608 | i += 1 |
---|
1609 | |
---|
1610 | assert lines[i].strip().upper().startswith('NUMBER OF CROSS-SECTIONS:') |
---|
1611 | number_of_cross_sections = int(lines[i].strip().split(':')[1]) |
---|
1612 | i += 1 |
---|
1613 | |
---|
1614 | |
---|
1615 | #Now read all points |
---|
1616 | points = [] |
---|
1617 | elevation = [] |
---|
1618 | for j, entries in enumerate(_read_hecras_cross_sections(lines[i:])): |
---|
1619 | for k, entry in enumerate(entries): |
---|
1620 | points.append(entry[:2]) |
---|
1621 | elevation.append(entry[2]) |
---|
1622 | |
---|
1623 | |
---|
1624 | msg = 'Actual #number_of_cross_sections == %d, Reported as %d'\ |
---|
1625 | %(j+1, number_of_cross_sections) |
---|
1626 | assert j+1 == number_of_cross_sections, msg |
---|
1627 | |
---|
1628 | #Get output file |
---|
1629 | if basename_out == None: |
---|
1630 | ptsname = root + '.pts' |
---|
1631 | else: |
---|
1632 | ptsname = basename_out + '.pts' |
---|
1633 | |
---|
1634 | geo_ref = Geo_reference(zone, 0, 0, datum, projection, units) |
---|
1635 | geo = Geospatial_data(points, {"elevation":elevation}, |
---|
1636 | verbose=verbose, geo_reference=geo_ref) |
---|
1637 | geo.export_points_file(ptsname) |
---|
1638 | |
---|
1639 | def export_grid(basename_in, extra_name_out = None, |
---|
1640 | quantities = None, # defaults to elevation |
---|
1641 | timestep = None, |
---|
1642 | reduction = None, |
---|
1643 | cellsize = 10, |
---|
1644 | NODATA_value = -9999, |
---|
1645 | easting_min = None, |
---|
1646 | easting_max = None, |
---|
1647 | northing_min = None, |
---|
1648 | northing_max = None, |
---|
1649 | verbose = False, |
---|
1650 | origin = None, |
---|
1651 | datum = 'WGS84', |
---|
1652 | format = 'ers'): |
---|
1653 | """ |
---|
1654 | |
---|
1655 | Wrapper for sww2dem. - see sww2dem to find out what most of the |
---|
1656 | parameters do. |
---|
1657 | |
---|
1658 | Quantities is a list of quantities. Each quantity will be |
---|
1659 | calculated for each sww file. |
---|
1660 | |
---|
1661 | This returns the basenames of the files returned, which is made up |
---|
1662 | of the dir and all of the file name, except the extension. |
---|
1663 | |
---|
1664 | This function returns the names of the files produced. |
---|
1665 | |
---|
1666 | It will also produce as many output files as there are input sww files. |
---|
1667 | """ |
---|
1668 | |
---|
1669 | if quantities is None: |
---|
1670 | quantities = ['elevation'] |
---|
1671 | |
---|
1672 | if type(quantities) is str: |
---|
1673 | quantities = [quantities] |
---|
1674 | |
---|
1675 | # How many sww files are there? |
---|
1676 | dir, base = os.path.split(basename_in) |
---|
1677 | #print "basename_in",basename_in |
---|
1678 | #print "base",base |
---|
1679 | |
---|
1680 | iterate_over = get_all_swwfiles(dir,base,verbose) |
---|
1681 | |
---|
1682 | if dir == "": |
---|
1683 | dir = "." # Unix compatibility |
---|
1684 | |
---|
1685 | files_out = [] |
---|
1686 | #print 'sww_file',iterate_over |
---|
1687 | for sww_file in iterate_over: |
---|
1688 | for quantity in quantities: |
---|
1689 | if extra_name_out is None: |
---|
1690 | basename_out = sww_file + '_' + quantity |
---|
1691 | else: |
---|
1692 | basename_out = sww_file + '_' + quantity + '_' \ |
---|
1693 | + extra_name_out |
---|
1694 | # print "basename_out", basename_out |
---|
1695 | |
---|
1696 | file_out = sww2dem(dir+sep+sww_file, dir+sep+basename_out, |
---|
1697 | quantity, |
---|
1698 | timestep, |
---|
1699 | reduction, |
---|
1700 | cellsize, |
---|
1701 | NODATA_value, |
---|
1702 | easting_min, |
---|
1703 | easting_max, |
---|
1704 | northing_min, |
---|
1705 | northing_max, |
---|
1706 | verbose, |
---|
1707 | origin, |
---|
1708 | datum, |
---|
1709 | format) |
---|
1710 | files_out.append(file_out) |
---|
1711 | #print "basenames_out after",basenames_out |
---|
1712 | return files_out |
---|
1713 | |
---|
1714 | |
---|
1715 | def get_timeseries(production_dirs, output_dir, scenario_name, gauges_dir_name, |
---|
1716 | plot_quantity, generate_fig = False, |
---|
1717 | reportname = None, surface = False, time_min = None, |
---|
1718 | time_max = None, title_on = False, verbose = True, |
---|
1719 | nodes=None): |
---|
1720 | """ |
---|
1721 | nodes - number of processes used. |
---|
1722 | |
---|
1723 | warning - this function has no tests |
---|
1724 | """ |
---|
1725 | if reportname == None: |
---|
1726 | report = False |
---|
1727 | else: |
---|
1728 | report = True |
---|
1729 | |
---|
1730 | if nodes is None: |
---|
1731 | is_parallel = False |
---|
1732 | else: |
---|
1733 | is_parallel = True |
---|
1734 | |
---|
1735 | # Generate figures |
---|
1736 | swwfiles = {} |
---|
1737 | |
---|
1738 | if is_parallel is True: |
---|
1739 | for i in range(nodes): |
---|
1740 | print 'Sending node %d of %d' %(i,nodes) |
---|
1741 | swwfiles = {} |
---|
1742 | if not reportname == None: |
---|
1743 | reportname = report_name + '_%s' %(i) |
---|
1744 | for label_id in production_dirs.keys(): |
---|
1745 | if label_id == 'boundaries': |
---|
1746 | swwfile = best_boundary_sww |
---|
1747 | else: |
---|
1748 | file_loc = output_dir + label_id + sep |
---|
1749 | sww_extra = '_P%s_%s' %(i,nodes) |
---|
1750 | swwfile = file_loc + scenario_name + sww_extra + '.sww' |
---|
1751 | print 'swwfile',swwfile |
---|
1752 | swwfiles[swwfile] = label_id |
---|
1753 | |
---|
1754 | texname, elev_output = sww2timeseries(swwfiles, |
---|
1755 | gauges_dir_name, |
---|
1756 | production_dirs, |
---|
1757 | report = report, |
---|
1758 | reportname = reportname, |
---|
1759 | plot_quantity = plot_quantity, |
---|
1760 | generate_fig = generate_fig, |
---|
1761 | surface = surface, |
---|
1762 | time_min = time_min, |
---|
1763 | time_max = time_max, |
---|
1764 | title_on = title_on, |
---|
1765 | verbose = verbose) |
---|
1766 | else: |
---|
1767 | for label_id in production_dirs.keys(): |
---|
1768 | if label_id == 'boundaries': |
---|
1769 | print 'boundaries' |
---|
1770 | file_loc = project.boundaries_in_dir |
---|
1771 | swwfile = project.boundaries_dir_name3 + '.sww' |
---|
1772 | # swwfile = boundary_dir_filename |
---|
1773 | else: |
---|
1774 | file_loc = output_dir + label_id + sep |
---|
1775 | swwfile = file_loc + scenario_name + '.sww' |
---|
1776 | swwfiles[swwfile] = label_id |
---|
1777 | |
---|
1778 | texname, elev_output = sww2timeseries(swwfiles, |
---|
1779 | gauges_dir_name, |
---|
1780 | production_dirs, |
---|
1781 | report = report, |
---|
1782 | reportname = reportname, |
---|
1783 | plot_quantity = plot_quantity, |
---|
1784 | generate_fig = generate_fig, |
---|
1785 | surface = surface, |
---|
1786 | time_min = time_min, |
---|
1787 | time_max = time_max, |
---|
1788 | title_on = title_on, |
---|
1789 | verbose = verbose) |
---|
1790 | |
---|
1791 | |
---|
1792 | |
---|
1793 | def sww2dem(basename_in, basename_out = None, |
---|
1794 | quantity = None, # defaults to elevation |
---|
1795 | timestep = None, |
---|
1796 | reduction = None, |
---|
1797 | cellsize = 10, |
---|
1798 | NODATA_value = -9999, |
---|
1799 | easting_min = None, |
---|
1800 | easting_max = None, |
---|
1801 | northing_min = None, |
---|
1802 | northing_max = None, |
---|
1803 | verbose = False, |
---|
1804 | origin = None, |
---|
1805 | datum = 'WGS84', |
---|
1806 | format = 'ers'): |
---|
1807 | |
---|
1808 | """Read SWW file and convert to Digitial Elevation model format |
---|
1809 | (.asc or .ers) |
---|
1810 | |
---|
1811 | Example (ASC): |
---|
1812 | |
---|
1813 | ncols 3121 |
---|
1814 | nrows 1800 |
---|
1815 | xllcorner 722000 |
---|
1816 | yllcorner 5893000 |
---|
1817 | cellsize 25 |
---|
1818 | NODATA_value -9999 |
---|
1819 | 138.3698 137.4194 136.5062 135.5558 .......... |
---|
1820 | |
---|
1821 | Also write accompanying file with same basename_in but extension .prj |
---|
1822 | used to fix the UTM zone, datum, false northings and eastings. |
---|
1823 | |
---|
1824 | The prj format is assumed to be as |
---|
1825 | |
---|
1826 | Projection UTM |
---|
1827 | Zone 56 |
---|
1828 | Datum WGS84 |
---|
1829 | Zunits NO |
---|
1830 | Units METERS |
---|
1831 | Spheroid WGS84 |
---|
1832 | Xshift 0.0000000000 |
---|
1833 | Yshift 10000000.0000000000 |
---|
1834 | Parameters |
---|
1835 | |
---|
1836 | |
---|
1837 | The parameter quantity must be the name of an existing quantity or |
---|
1838 | an expression involving existing quantities. The default is |
---|
1839 | 'elevation'. Quantity is not a list of quantities. |
---|
1840 | |
---|
1841 | if timestep (an index) is given, output quantity at that timestep |
---|
1842 | |
---|
1843 | if reduction is given use that to reduce quantity over all timesteps. |
---|
1844 | |
---|
1845 | datum |
---|
1846 | |
---|
1847 | format can be either 'asc' or 'ers' |
---|
1848 | """ |
---|
1849 | |
---|
1850 | import sys |
---|
1851 | from Numeric import array, Float, concatenate, NewAxis, zeros, reshape, \ |
---|
1852 | sometrue |
---|
1853 | from Numeric import array2string |
---|
1854 | |
---|
1855 | from anuga.utilities.polygon import inside_polygon, outside_polygon, \ |
---|
1856 | separate_points_by_polygon |
---|
1857 | from anuga.abstract_2d_finite_volumes.util import \ |
---|
1858 | apply_expression_to_dictionary |
---|
1859 | |
---|
1860 | msg = 'Format must be either asc or ers' |
---|
1861 | assert format.lower() in ['asc', 'ers'], msg |
---|
1862 | |
---|
1863 | |
---|
1864 | false_easting = 500000 |
---|
1865 | false_northing = 10000000 |
---|
1866 | |
---|
1867 | if quantity is None: |
---|
1868 | quantity = 'elevation' |
---|
1869 | |
---|
1870 | if reduction is None: |
---|
1871 | reduction = max |
---|
1872 | |
---|
1873 | if basename_out is None: |
---|
1874 | basename_out = basename_in + '_%s' %quantity |
---|
1875 | |
---|
1876 | if quantity_formula.has_key(quantity): |
---|
1877 | quantity = quantity_formula[quantity] |
---|
1878 | |
---|
1879 | swwfile = basename_in + '.sww' |
---|
1880 | demfile = basename_out + '.' + format |
---|
1881 | # Note the use of a .ers extension is optional (write_ermapper_grid will |
---|
1882 | # deal with either option |
---|
1883 | |
---|
1884 | #if verbose: bye= nsuadsfd[0] # uncomment to check catching verbose errors |
---|
1885 | |
---|
1886 | # Read sww file |
---|
1887 | if verbose: |
---|
1888 | print 'Reading from %s' %swwfile |
---|
1889 | print 'Output directory is %s' %basename_out |
---|
1890 | |
---|
1891 | from Scientific.IO.NetCDF import NetCDFFile |
---|
1892 | fid = NetCDFFile(swwfile) |
---|
1893 | |
---|
1894 | #Get extent and reference |
---|
1895 | x = fid.variables['x'][:] |
---|
1896 | y = fid.variables['y'][:] |
---|
1897 | volumes = fid.variables['volumes'][:] |
---|
1898 | if timestep is not None: |
---|
1899 | times = fid.variables['time'][timestep] |
---|
1900 | else: |
---|
1901 | times = fid.variables['time'][:] |
---|
1902 | |
---|
1903 | number_of_timesteps = fid.dimensions['number_of_timesteps'] |
---|
1904 | number_of_points = fid.dimensions['number_of_points'] |
---|
1905 | |
---|
1906 | if origin is None: |
---|
1907 | |
---|
1908 | # Get geo_reference |
---|
1909 | # sww files don't have to have a geo_ref |
---|
1910 | try: |
---|
1911 | geo_reference = Geo_reference(NetCDFObject=fid) |
---|
1912 | except AttributeError, e: |
---|
1913 | geo_reference = Geo_reference() # Default georef object |
---|
1914 | |
---|
1915 | xllcorner = geo_reference.get_xllcorner() |
---|
1916 | yllcorner = geo_reference.get_yllcorner() |
---|
1917 | zone = geo_reference.get_zone() |
---|
1918 | else: |
---|
1919 | zone = origin[0] |
---|
1920 | xllcorner = origin[1] |
---|
1921 | yllcorner = origin[2] |
---|
1922 | |
---|
1923 | |
---|
1924 | |
---|
1925 | # FIXME: Refactor using code from Interpolation_function.statistics |
---|
1926 | # (in interpolate.py) |
---|
1927 | # Something like print swwstats(swwname) |
---|
1928 | if verbose: |
---|
1929 | print '------------------------------------------------' |
---|
1930 | print 'Statistics of SWW file:' |
---|
1931 | print ' Name: %s' %swwfile |
---|
1932 | print ' Reference:' |
---|
1933 | print ' Lower left corner: [%f, %f]'\ |
---|
1934 | %(xllcorner, yllcorner) |
---|
1935 | if timestep is not None: |
---|
1936 | print ' Time: %f' %(times) |
---|
1937 | else: |
---|
1938 | print ' Start time: %f' %fid.starttime[0] |
---|
1939 | print ' Extent:' |
---|
1940 | print ' x [m] in [%f, %f], len(x) == %d'\ |
---|
1941 | %(min(x.flat), max(x.flat), len(x.flat)) |
---|
1942 | print ' y [m] in [%f, %f], len(y) == %d'\ |
---|
1943 | %(min(y.flat), max(y.flat), len(y.flat)) |
---|
1944 | if timestep is not None: |
---|
1945 | print ' t [s] = %f, len(t) == %d' %(times, 1) |
---|
1946 | else: |
---|
1947 | print ' t [s] in [%f, %f], len(t) == %d'\ |
---|
1948 | %(min(times), max(times), len(times)) |
---|
1949 | print ' Quantities [SI units]:' |
---|
1950 | # Comment out for reduced memory consumption |
---|
1951 | for name in ['stage', 'xmomentum', 'ymomentum']: |
---|
1952 | q = fid.variables[name][:].flat |
---|
1953 | if timestep is not None: |
---|
1954 | q = q[timestep*len(x):(timestep+1)*len(x)] |
---|
1955 | if verbose: print ' %s in [%f, %f]' %(name, min(q), max(q)) |
---|
1956 | for name in ['elevation']: |
---|
1957 | q = fid.variables[name][:].flat |
---|
1958 | if verbose: print ' %s in [%f, %f]' %(name, min(q), max(q)) |
---|
1959 | |
---|
1960 | # Get quantity and reduce if applicable |
---|
1961 | if verbose: print 'Processing quantity %s' %quantity |
---|
1962 | |
---|
1963 | # Turn NetCDF objects into Numeric arrays |
---|
1964 | try: |
---|
1965 | q = fid.variables[quantity][:] |
---|
1966 | |
---|
1967 | |
---|
1968 | except: |
---|
1969 | quantity_dict = {} |
---|
1970 | for name in fid.variables.keys(): |
---|
1971 | quantity_dict[name] = fid.variables[name][:] |
---|
1972 | #Convert quantity expression to quantities found in sww file |
---|
1973 | q = apply_expression_to_dictionary(quantity, quantity_dict) |
---|
1974 | #print "q.shape",q.shape |
---|
1975 | if len(q.shape) == 2: |
---|
1976 | #q has a time component and needs to be reduced along |
---|
1977 | #the temporal dimension |
---|
1978 | if verbose: print 'Reducing quantity %s' %quantity |
---|
1979 | q_reduced = zeros( number_of_points, Float ) |
---|
1980 | |
---|
1981 | if timestep is not None: |
---|
1982 | for k in range(number_of_points): |
---|
1983 | q_reduced[k] = q[timestep,k] |
---|
1984 | else: |
---|
1985 | for k in range(number_of_points): |
---|
1986 | q_reduced[k] = reduction( q[:,k] ) |
---|
1987 | |
---|
1988 | q = q_reduced |
---|
1989 | |
---|
1990 | #Post condition: Now q has dimension: number_of_points |
---|
1991 | assert len(q.shape) == 1 |
---|
1992 | assert q.shape[0] == number_of_points |
---|
1993 | |
---|
1994 | if verbose: |
---|
1995 | print 'Processed values for %s are in [%f, %f]' %(quantity, min(q), max(q)) |
---|
1996 | |
---|
1997 | |
---|
1998 | #Create grid and update xll/yll corner and x,y |
---|
1999 | |
---|
2000 | #Relative extent |
---|
2001 | if easting_min is None: |
---|
2002 | xmin = min(x) |
---|
2003 | else: |
---|
2004 | xmin = easting_min - xllcorner |
---|
2005 | |
---|
2006 | if easting_max is None: |
---|
2007 | xmax = max(x) |
---|
2008 | else: |
---|
2009 | xmax = easting_max - xllcorner |
---|
2010 | |
---|
2011 | if northing_min is None: |
---|
2012 | ymin = min(y) |
---|
2013 | else: |
---|
2014 | ymin = northing_min - yllcorner |
---|
2015 | |
---|
2016 | if northing_max is None: |
---|
2017 | ymax = max(y) |
---|
2018 | else: |
---|
2019 | ymax = northing_max - yllcorner |
---|
2020 | |
---|
2021 | |
---|
2022 | |
---|
2023 | if verbose: print 'Creating grid' |
---|
2024 | ncols = int((xmax-xmin)/cellsize)+1 |
---|
2025 | nrows = int((ymax-ymin)/cellsize)+1 |
---|
2026 | |
---|
2027 | |
---|
2028 | |
---|
2029 | #New absolute reference and coordinates |
---|
2030 | newxllcorner = xmin+xllcorner |
---|
2031 | newyllcorner = ymin+yllcorner |
---|
2032 | |
---|
2033 | x = x+xllcorner-newxllcorner |
---|
2034 | y = y+yllcorner-newyllcorner |
---|
2035 | |
---|
2036 | vertex_points = concatenate ((x[:, NewAxis] ,y[:, NewAxis]), axis = 1) |
---|
2037 | assert len(vertex_points.shape) == 2 |
---|
2038 | |
---|
2039 | grid_points = zeros ( (ncols*nrows, 2), Float ) |
---|
2040 | |
---|
2041 | |
---|
2042 | for i in xrange(nrows): |
---|
2043 | if format.lower() == 'asc': |
---|
2044 | yg = i*cellsize |
---|
2045 | else: |
---|
2046 | #this will flip the order of the y values for ers |
---|
2047 | yg = (nrows-i)*cellsize |
---|
2048 | |
---|
2049 | for j in xrange(ncols): |
---|
2050 | xg = j*cellsize |
---|
2051 | k = i*ncols + j |
---|
2052 | |
---|
2053 | grid_points[k,0] = xg |
---|
2054 | grid_points[k,1] = yg |
---|
2055 | |
---|
2056 | #Interpolate |
---|
2057 | from anuga.fit_interpolate.interpolate import Interpolate |
---|
2058 | |
---|
2059 | # Remove loners from vertex_points, volumes here |
---|
2060 | vertex_points, volumes = remove_lone_verts(vertex_points, volumes) |
---|
2061 | #export_mesh_file('monkey.tsh',{'vertices':vertex_points, 'triangles':volumes}) |
---|
2062 | #import sys; sys.exit() |
---|
2063 | interp = Interpolate(vertex_points, volumes, verbose = verbose) |
---|
2064 | |
---|
2065 | #Interpolate using quantity values |
---|
2066 | if verbose: print 'Interpolating' |
---|
2067 | grid_values = interp.interpolate(q, grid_points).flat |
---|
2068 | |
---|
2069 | |
---|
2070 | if verbose: |
---|
2071 | print 'Interpolated values are in [%f, %f]' %(min(grid_values), |
---|
2072 | max(grid_values)) |
---|
2073 | |
---|
2074 | #Assign NODATA_value to all points outside bounding polygon (from interpolation mesh) |
---|
2075 | P = interp.mesh.get_boundary_polygon() |
---|
2076 | outside_indices = outside_polygon(grid_points, P, closed=True) |
---|
2077 | |
---|
2078 | for i in outside_indices: |
---|
2079 | grid_values[i] = NODATA_value |
---|
2080 | |
---|
2081 | |
---|
2082 | |
---|
2083 | |
---|
2084 | if format.lower() == 'ers': |
---|
2085 | # setup ERS header information |
---|
2086 | grid_values = reshape(grid_values,(nrows, ncols)) |
---|
2087 | header = {} |
---|
2088 | header['datum'] = '"' + datum + '"' |
---|
2089 | # FIXME The use of hardwired UTM and zone number needs to be made optional |
---|
2090 | # FIXME Also need an automatic test for coordinate type (i.e. EN or LL) |
---|
2091 | header['projection'] = '"UTM-' + str(zone) + '"' |
---|
2092 | header['coordinatetype'] = 'EN' |
---|
2093 | if header['coordinatetype'] == 'LL': |
---|
2094 | header['longitude'] = str(newxllcorner) |
---|
2095 | header['latitude'] = str(newyllcorner) |
---|
2096 | elif header['coordinatetype'] == 'EN': |
---|
2097 | header['eastings'] = str(newxllcorner) |
---|
2098 | header['northings'] = str(newyllcorner) |
---|
2099 | header['nullcellvalue'] = str(NODATA_value) |
---|
2100 | header['xdimension'] = str(cellsize) |
---|
2101 | header['ydimension'] = str(cellsize) |
---|
2102 | header['value'] = '"' + quantity + '"' |
---|
2103 | #header['celltype'] = 'IEEE8ByteReal' #FIXME: Breaks unit test |
---|
2104 | |
---|
2105 | |
---|
2106 | #Write |
---|
2107 | if verbose: print 'Writing %s' %demfile |
---|
2108 | import ermapper_grids |
---|
2109 | ermapper_grids.write_ermapper_grid(demfile, grid_values, header) |
---|
2110 | |
---|
2111 | fid.close() |
---|
2112 | else: |
---|
2113 | #Write to Ascii format |
---|
2114 | |
---|
2115 | #Write prj file |
---|
2116 | prjfile = basename_out + '.prj' |
---|
2117 | |
---|
2118 | if verbose: print 'Writing %s' %prjfile |
---|
2119 | prjid = open(prjfile, 'w') |
---|
2120 | prjid.write('Projection %s\n' %'UTM') |
---|
2121 | prjid.write('Zone %d\n' %zone) |
---|
2122 | prjid.write('Datum %s\n' %datum) |
---|
2123 | prjid.write('Zunits NO\n') |
---|
2124 | prjid.write('Units METERS\n') |
---|
2125 | prjid.write('Spheroid %s\n' %datum) |
---|
2126 | prjid.write('Xshift %d\n' %false_easting) |
---|
2127 | prjid.write('Yshift %d\n' %false_northing) |
---|
2128 | prjid.write('Parameters\n') |
---|
2129 | prjid.close() |
---|
2130 | |
---|
2131 | |
---|
2132 | |
---|
2133 | if verbose: print 'Writing %s' %demfile |
---|
2134 | |
---|
2135 | ascid = open(demfile, 'w') |
---|
2136 | |
---|
2137 | ascid.write('ncols %d\n' %ncols) |
---|
2138 | ascid.write('nrows %d\n' %nrows) |
---|
2139 | ascid.write('xllcorner %d\n' %newxllcorner) |
---|
2140 | ascid.write('yllcorner %d\n' %newyllcorner) |
---|
2141 | ascid.write('cellsize %f\n' %cellsize) |
---|
2142 | ascid.write('NODATA_value %d\n' %NODATA_value) |
---|
2143 | |
---|
2144 | |
---|
2145 | #Get bounding polygon from mesh |
---|
2146 | #P = interp.mesh.get_boundary_polygon() |
---|
2147 | #inside_indices = inside_polygon(grid_points, P) |
---|
2148 | |
---|
2149 | for i in range(nrows): |
---|
2150 | if verbose and i%((nrows+10)/10)==0: |
---|
2151 | print 'Doing row %d of %d' %(i, nrows) |
---|
2152 | |
---|
2153 | base_index = (nrows-i-1)*ncols |
---|
2154 | |
---|
2155 | slice = grid_values[base_index:base_index+ncols] |
---|
2156 | s = array2string(slice, max_line_width=sys.maxint) |
---|
2157 | ascid.write(s[1:-1] + '\n') |
---|
2158 | |
---|
2159 | |
---|
2160 | #print |
---|
2161 | #for j in range(ncols): |
---|
2162 | # index = base_index+j# |
---|
2163 | # print grid_values[index], |
---|
2164 | # ascid.write('%f ' %grid_values[index]) |
---|
2165 | #ascid.write('\n') |
---|
2166 | |
---|
2167 | #Close |
---|
2168 | ascid.close() |
---|
2169 | fid.close() |
---|
2170 | return basename_out |
---|
2171 | |
---|
2172 | |
---|
2173 | #Backwards compatibility |
---|
2174 | def sww2asc(basename_in, basename_out = None, |
---|
2175 | quantity = None, |
---|
2176 | timestep = None, |
---|
2177 | reduction = None, |
---|
2178 | cellsize = 10, |
---|
2179 | verbose = False, |
---|
2180 | origin = None): |
---|
2181 | print 'sww2asc will soon be obsoleted - please use sww2dem' |
---|
2182 | sww2dem(basename_in, |
---|
2183 | basename_out = basename_out, |
---|
2184 | quantity = quantity, |
---|
2185 | timestep = timestep, |
---|
2186 | reduction = reduction, |
---|
2187 | cellsize = cellsize, |
---|
2188 | verbose = verbose, |
---|
2189 | origin = origin, |
---|
2190 | datum = 'WGS84', |
---|
2191 | format = 'asc') |
---|
2192 | |
---|
2193 | def sww2ers(basename_in, basename_out = None, |
---|
2194 | quantity = None, |
---|
2195 | timestep = None, |
---|
2196 | reduction = None, |
---|
2197 | cellsize = 10, |
---|
2198 | verbose = False, |
---|
2199 | origin = None, |
---|
2200 | datum = 'WGS84'): |
---|
2201 | print 'sww2ers will soon be obsoleted - please use sww2dem' |
---|
2202 | sww2dem(basename_in, |
---|
2203 | basename_out = basename_out, |
---|
2204 | quantity = quantity, |
---|
2205 | timestep = timestep, |
---|
2206 | reduction = reduction, |
---|
2207 | cellsize = cellsize, |
---|
2208 | verbose = verbose, |
---|
2209 | origin = origin, |
---|
2210 | datum = datum, |
---|
2211 | format = 'ers') |
---|
2212 | ################################# END COMPATIBILITY ############## |
---|
2213 | |
---|
2214 | |
---|
2215 | |
---|
2216 | def sww2pts(basename_in, basename_out=None, |
---|
2217 | data_points=None, |
---|
2218 | quantity=None, |
---|
2219 | timestep=None, |
---|
2220 | reduction=None, |
---|
2221 | NODATA_value=-9999, |
---|
2222 | verbose=False, |
---|
2223 | origin=None): |
---|
2224 | #datum = 'WGS84') |
---|
2225 | |
---|
2226 | |
---|
2227 | """Read SWW file and convert to interpolated values at selected points |
---|
2228 | |
---|
2229 | The parameter quantity' must be the name of an existing quantity or |
---|
2230 | an expression involving existing quantities. The default is |
---|
2231 | 'elevation'. |
---|
2232 | |
---|
2233 | if timestep (an index) is given, output quantity at that timestep |
---|
2234 | |
---|
2235 | if reduction is given use that to reduce quantity over all timesteps. |
---|
2236 | |
---|
2237 | data_points (Nx2 array) give locations of points where quantity is to be computed. |
---|
2238 | |
---|
2239 | """ |
---|
2240 | |
---|
2241 | import sys |
---|
2242 | from Numeric import array, Float, concatenate, NewAxis, zeros, reshape, sometrue |
---|
2243 | from Numeric import array2string |
---|
2244 | |
---|
2245 | from anuga.utilities.polygon import inside_polygon, outside_polygon, separate_points_by_polygon |
---|
2246 | from anuga.abstract_2d_finite_volumes.util import apply_expression_to_dictionary |
---|
2247 | |
---|
2248 | from anuga.geospatial_data.geospatial_data import Geospatial_data |
---|
2249 | |
---|
2250 | if quantity is None: |
---|
2251 | quantity = 'elevation' |
---|
2252 | |
---|
2253 | if reduction is None: |
---|
2254 | reduction = max |
---|
2255 | |
---|
2256 | if basename_out is None: |
---|
2257 | basename_out = basename_in + '_%s' %quantity |
---|
2258 | |
---|
2259 | swwfile = basename_in + '.sww' |
---|
2260 | ptsfile = basename_out + '.pts' |
---|
2261 | |
---|
2262 | # Read sww file |
---|
2263 | if verbose: print 'Reading from %s' %swwfile |
---|
2264 | from Scientific.IO.NetCDF import NetCDFFile |
---|
2265 | fid = NetCDFFile(swwfile) |
---|
2266 | |
---|
2267 | # Get extent and reference |
---|
2268 | x = fid.variables['x'][:] |
---|
2269 | y = fid.variables['y'][:] |
---|
2270 | volumes = fid.variables['volumes'][:] |
---|
2271 | |
---|
2272 | number_of_timesteps = fid.dimensions['number_of_timesteps'] |
---|
2273 | number_of_points = fid.dimensions['number_of_points'] |
---|
2274 | if origin is None: |
---|
2275 | |
---|
2276 | # Get geo_reference |
---|
2277 | # sww files don't have to have a geo_ref |
---|
2278 | try: |
---|
2279 | geo_reference = Geo_reference(NetCDFObject=fid) |
---|
2280 | except AttributeError, e: |
---|
2281 | geo_reference = Geo_reference() #Default georef object |
---|
2282 | |
---|
2283 | xllcorner = geo_reference.get_xllcorner() |
---|
2284 | yllcorner = geo_reference.get_yllcorner() |
---|
2285 | zone = geo_reference.get_zone() |
---|
2286 | else: |
---|
2287 | zone = origin[0] |
---|
2288 | xllcorner = origin[1] |
---|
2289 | yllcorner = origin[2] |
---|
2290 | |
---|
2291 | |
---|
2292 | |
---|
2293 | # FIXME: Refactor using code from file_function.statistics |
---|
2294 | # Something like print swwstats(swwname) |
---|
2295 | if verbose: |
---|
2296 | x = fid.variables['x'][:] |
---|
2297 | y = fid.variables['y'][:] |
---|
2298 | times = fid.variables['time'][:] |
---|
2299 | print '------------------------------------------------' |
---|
2300 | print 'Statistics of SWW file:' |
---|
2301 | print ' Name: %s' %swwfile |
---|
2302 | print ' Reference:' |
---|
2303 | print ' Lower left corner: [%f, %f]'\ |
---|
2304 | %(xllcorner, yllcorner) |
---|
2305 | print ' Start time: %f' %fid.starttime[0] |
---|
2306 | print ' Extent:' |
---|
2307 | print ' x [m] in [%f, %f], len(x) == %d'\ |
---|
2308 | %(min(x.flat), max(x.flat), len(x.flat)) |
---|
2309 | print ' y [m] in [%f, %f], len(y) == %d'\ |
---|
2310 | %(min(y.flat), max(y.flat), len(y.flat)) |
---|
2311 | print ' t [s] in [%f, %f], len(t) == %d'\ |
---|
2312 | %(min(times), max(times), len(times)) |
---|
2313 | print ' Quantities [SI units]:' |
---|
2314 | for name in ['stage', 'xmomentum', 'ymomentum', 'elevation']: |
---|
2315 | q = fid.variables[name][:].flat |
---|
2316 | print ' %s in [%f, %f]' %(name, min(q), max(q)) |
---|
2317 | |
---|
2318 | |
---|
2319 | |
---|
2320 | # Get quantity and reduce if applicable |
---|
2321 | if verbose: print 'Processing quantity %s' %quantity |
---|
2322 | |
---|
2323 | # Turn NetCDF objects into Numeric arrays |
---|
2324 | quantity_dict = {} |
---|
2325 | for name in fid.variables.keys(): |
---|
2326 | quantity_dict[name] = fid.variables[name][:] |
---|
2327 | |
---|
2328 | |
---|
2329 | |
---|
2330 | # Convert quantity expression to quantities found in sww file |
---|
2331 | q = apply_expression_to_dictionary(quantity, quantity_dict) |
---|
2332 | |
---|
2333 | |
---|
2334 | |
---|
2335 | if len(q.shape) == 2: |
---|
2336 | # q has a time component and needs to be reduced along |
---|
2337 | # the temporal dimension |
---|
2338 | if verbose: print 'Reducing quantity %s' %quantity |
---|
2339 | q_reduced = zeros( number_of_points, Float ) |
---|
2340 | |
---|
2341 | for k in range(number_of_points): |
---|
2342 | q_reduced[k] = reduction( q[:,k] ) |
---|
2343 | |
---|
2344 | q = q_reduced |
---|
2345 | |
---|
2346 | # Post condition: Now q has dimension: number_of_points |
---|
2347 | assert len(q.shape) == 1 |
---|
2348 | assert q.shape[0] == number_of_points |
---|
2349 | |
---|
2350 | |
---|
2351 | if verbose: |
---|
2352 | print 'Processed values for %s are in [%f, %f]' %(quantity, min(q), max(q)) |
---|
2353 | |
---|
2354 | |
---|
2355 | # Create grid and update xll/yll corner and x,y |
---|
2356 | vertex_points = concatenate ((x[:, NewAxis] ,y[:, NewAxis]), axis = 1) |
---|
2357 | assert len(vertex_points.shape) == 2 |
---|
2358 | |
---|
2359 | # Interpolate |
---|
2360 | from anuga.fit_interpolate.interpolate import Interpolate |
---|
2361 | interp = Interpolate(vertex_points, volumes, verbose = verbose) |
---|
2362 | |
---|
2363 | # Interpolate using quantity values |
---|
2364 | if verbose: print 'Interpolating' |
---|
2365 | interpolated_values = interp.interpolate(q, data_points).flat |
---|
2366 | |
---|
2367 | |
---|
2368 | if verbose: |
---|
2369 | print 'Interpolated values are in [%f, %f]' %(min(interpolated_values), |
---|
2370 | max(interpolated_values)) |
---|
2371 | |
---|
2372 | # Assign NODATA_value to all points outside bounding polygon (from interpolation mesh) |
---|
2373 | P = interp.mesh.get_boundary_polygon() |
---|
2374 | outside_indices = outside_polygon(data_points, P, closed=True) |
---|
2375 | |
---|
2376 | for i in outside_indices: |
---|
2377 | interpolated_values[i] = NODATA_value |
---|
2378 | |
---|
2379 | |
---|
2380 | # Store results |
---|
2381 | G = Geospatial_data(data_points=data_points, |
---|
2382 | attributes=interpolated_values) |
---|
2383 | |
---|
2384 | G.export_points_file(ptsfile, absolute = True) |
---|
2385 | |
---|
2386 | fid.close() |
---|
2387 | |
---|
2388 | |
---|
2389 | def convert_dem_from_ascii2netcdf(basename_in, basename_out = None, |
---|
2390 | use_cache = False, |
---|
2391 | verbose = False): |
---|
2392 | """Read Digitial Elevation model from the following ASCII format (.asc) |
---|
2393 | |
---|
2394 | Example: |
---|
2395 | |
---|
2396 | ncols 3121 |
---|
2397 | nrows 1800 |
---|
2398 | xllcorner 722000 |
---|
2399 | yllcorner 5893000 |
---|
2400 | cellsize 25 |
---|
2401 | NODATA_value -9999 |
---|
2402 | 138.3698 137.4194 136.5062 135.5558 .......... |
---|
2403 | |
---|
2404 | Convert basename_in + '.asc' to NetCDF format (.dem) |
---|
2405 | mimicking the ASCII format closely. |
---|
2406 | |
---|
2407 | |
---|
2408 | An accompanying file with same basename_in but extension .prj must exist |
---|
2409 | and is used to fix the UTM zone, datum, false northings and eastings. |
---|
2410 | |
---|
2411 | The prj format is assumed to be as |
---|
2412 | |
---|
2413 | Projection UTM |
---|
2414 | Zone 56 |
---|
2415 | Datum WGS84 |
---|
2416 | Zunits NO |
---|
2417 | Units METERS |
---|
2418 | Spheroid WGS84 |
---|
2419 | Xshift 0.0000000000 |
---|
2420 | Yshift 10000000.0000000000 |
---|
2421 | Parameters |
---|
2422 | """ |
---|
2423 | |
---|
2424 | |
---|
2425 | |
---|
2426 | kwargs = {'basename_out': basename_out, 'verbose': verbose} |
---|
2427 | |
---|
2428 | if use_cache is True: |
---|
2429 | from caching import cache |
---|
2430 | result = cache(_convert_dem_from_ascii2netcdf, basename_in, kwargs, |
---|
2431 | dependencies = [basename_in + '.asc', |
---|
2432 | basename_in + '.prj'], |
---|
2433 | verbose = verbose) |
---|
2434 | |
---|
2435 | else: |
---|
2436 | result = apply(_convert_dem_from_ascii2netcdf, [basename_in], kwargs) |
---|
2437 | |
---|
2438 | return result |
---|
2439 | |
---|
2440 | |
---|
2441 | |
---|
2442 | |
---|
2443 | |
---|
2444 | |
---|
2445 | def _convert_dem_from_ascii2netcdf(basename_in, basename_out = None, |
---|
2446 | verbose = False): |
---|
2447 | """Read Digitial Elevation model from the following ASCII format (.asc) |
---|
2448 | |
---|
2449 | Internal function. See public function convert_dem_from_ascii2netcdf for details. |
---|
2450 | """ |
---|
2451 | |
---|
2452 | import os |
---|
2453 | from Scientific.IO.NetCDF import NetCDFFile |
---|
2454 | from Numeric import Float, array |
---|
2455 | |
---|
2456 | #root, ext = os.path.splitext(basename_in) |
---|
2457 | root = basename_in |
---|
2458 | |
---|
2459 | ########################################### |
---|
2460 | # Read Meta data |
---|
2461 | if verbose: print 'Reading METADATA from %s' %root + '.prj' |
---|
2462 | metadatafile = open(root + '.prj') |
---|
2463 | metalines = metadatafile.readlines() |
---|
2464 | metadatafile.close() |
---|
2465 | |
---|
2466 | L = metalines[0].strip().split() |
---|
2467 | assert L[0].strip().lower() == 'projection' |
---|
2468 | projection = L[1].strip() #TEXT |
---|
2469 | |
---|
2470 | L = metalines[1].strip().split() |
---|
2471 | assert L[0].strip().lower() == 'zone' |
---|
2472 | zone = int(L[1].strip()) |
---|
2473 | |
---|
2474 | L = metalines[2].strip().split() |
---|
2475 | assert L[0].strip().lower() == 'datum' |
---|
2476 | datum = L[1].strip() #TEXT |
---|
2477 | |
---|
2478 | L = metalines[3].strip().split() |
---|
2479 | assert L[0].strip().lower() == 'zunits' #IGNORE |
---|
2480 | zunits = L[1].strip() #TEXT |
---|
2481 | |
---|
2482 | L = metalines[4].strip().split() |
---|
2483 | assert L[0].strip().lower() == 'units' |
---|
2484 | units = L[1].strip() #TEXT |
---|
2485 | |
---|
2486 | L = metalines[5].strip().split() |
---|
2487 | assert L[0].strip().lower() == 'spheroid' #IGNORE |
---|
2488 | spheroid = L[1].strip() #TEXT |
---|
2489 | |
---|
2490 | L = metalines[6].strip().split() |
---|
2491 | assert L[0].strip().lower() == 'xshift' |
---|
2492 | false_easting = float(L[1].strip()) |
---|
2493 | |
---|
2494 | L = metalines[7].strip().split() |
---|
2495 | assert L[0].strip().lower() == 'yshift' |
---|
2496 | false_northing = float(L[1].strip()) |
---|
2497 | |
---|
2498 | #print false_easting, false_northing, zone, datum |
---|
2499 | |
---|
2500 | |
---|
2501 | ########################################### |
---|
2502 | #Read DEM data |
---|
2503 | |
---|
2504 | datafile = open(basename_in + '.asc') |
---|
2505 | |
---|
2506 | if verbose: print 'Reading DEM from %s' %(basename_in + '.asc') |
---|
2507 | lines = datafile.readlines() |
---|
2508 | datafile.close() |
---|
2509 | |
---|
2510 | if verbose: print 'Got', len(lines), ' lines' |
---|
2511 | |
---|
2512 | ncols = int(lines[0].split()[1].strip()) |
---|
2513 | nrows = int(lines[1].split()[1].strip()) |
---|
2514 | |
---|
2515 | # Do cellsize (line 4) before line 2 and 3 |
---|
2516 | cellsize = float(lines[4].split()[1].strip()) |
---|
2517 | |
---|
2518 | # Checks suggested by Joaquim Luis |
---|
2519 | # Our internal representation of xllcorner |
---|
2520 | # and yllcorner is non-standard. |
---|
2521 | xref = lines[2].split() |
---|
2522 | if xref[0].strip() == 'xllcorner': |
---|
2523 | xllcorner = float(xref[1].strip()) # + 0.5*cellsize # Correct offset |
---|
2524 | elif xref[0].strip() == 'xllcenter': |
---|
2525 | xllcorner = float(xref[1].strip()) |
---|
2526 | else: |
---|
2527 | msg = 'Unknown keyword: %s' %xref[0].strip() |
---|
2528 | raise Exception, msg |
---|
2529 | |
---|
2530 | yref = lines[3].split() |
---|
2531 | if yref[0].strip() == 'yllcorner': |
---|
2532 | yllcorner = float(yref[1].strip()) # + 0.5*cellsize # Correct offset |
---|
2533 | elif yref[0].strip() == 'yllcenter': |
---|
2534 | yllcorner = float(yref[1].strip()) |
---|
2535 | else: |
---|
2536 | msg = 'Unknown keyword: %s' %yref[0].strip() |
---|
2537 | raise Exception, msg |
---|
2538 | |
---|
2539 | |
---|
2540 | NODATA_value = int(lines[5].split()[1].strip()) |
---|
2541 | |
---|
2542 | assert len(lines) == nrows + 6 |
---|
2543 | |
---|
2544 | |
---|
2545 | ########################################## |
---|
2546 | |
---|
2547 | |
---|
2548 | if basename_out == None: |
---|
2549 | netcdfname = root + '.dem' |
---|
2550 | else: |
---|
2551 | netcdfname = basename_out + '.dem' |
---|
2552 | |
---|
2553 | if verbose: print 'Store to NetCDF file %s' %netcdfname |
---|
2554 | # NetCDF file definition |
---|
2555 | fid = NetCDFFile(netcdfname, 'w') |
---|
2556 | |
---|
2557 | #Create new file |
---|
2558 | fid.institution = 'Geoscience Australia' |
---|
2559 | fid.description = 'NetCDF DEM format for compact and portable storage ' +\ |
---|
2560 | 'of spatial point data' |
---|
2561 | |
---|
2562 | fid.ncols = ncols |
---|
2563 | fid.nrows = nrows |
---|
2564 | fid.xllcorner = xllcorner |
---|
2565 | fid.yllcorner = yllcorner |
---|
2566 | fid.cellsize = cellsize |
---|
2567 | fid.NODATA_value = NODATA_value |
---|
2568 | |
---|
2569 | fid.zone = zone |
---|
2570 | fid.false_easting = false_easting |
---|
2571 | fid.false_northing = false_northing |
---|
2572 | fid.projection = projection |
---|
2573 | fid.datum = datum |
---|
2574 | fid.units = units |
---|
2575 | |
---|
2576 | |
---|
2577 | # dimension definitions |
---|
2578 | fid.createDimension('number_of_rows', nrows) |
---|
2579 | fid.createDimension('number_of_columns', ncols) |
---|
2580 | |
---|
2581 | # variable definitions |
---|
2582 | fid.createVariable('elevation', Float, ('number_of_rows', |
---|
2583 | 'number_of_columns')) |
---|
2584 | |
---|
2585 | # Get handles to the variables |
---|
2586 | elevation = fid.variables['elevation'] |
---|
2587 | |
---|
2588 | #Store data |
---|
2589 | n = len(lines[6:]) |
---|
2590 | for i, line in enumerate(lines[6:]): |
---|
2591 | fields = line.split() |
---|
2592 | if verbose and i%((n+10)/10)==0: |
---|
2593 | print 'Processing row %d of %d' %(i, nrows) |
---|
2594 | |
---|
2595 | elevation[i, :] = array([float(x) for x in fields]) |
---|
2596 | |
---|
2597 | fid.close() |
---|
2598 | |
---|
2599 | |
---|
2600 | |
---|
2601 | |
---|
2602 | |
---|
2603 | def ferret2sww(basename_in, basename_out = None, |
---|
2604 | verbose = False, |
---|
2605 | minlat = None, maxlat = None, |
---|
2606 | minlon = None, maxlon = None, |
---|
2607 | mint = None, maxt = None, mean_stage = 0, |
---|
2608 | origin = None, zscale = 1, |
---|
2609 | fail_on_NaN = True, |
---|
2610 | NaN_filler = 0, |
---|
2611 | elevation = None, |
---|
2612 | inverted_bathymetry = True |
---|
2613 | ): #FIXME: Bathymetry should be obtained |
---|
2614 | #from MOST somehow. |
---|
2615 | #Alternatively from elsewhere |
---|
2616 | #or, as a last resort, |
---|
2617 | #specified here. |
---|
2618 | #The value of -100 will work |
---|
2619 | #for the Wollongong tsunami |
---|
2620 | #scenario but is very hacky |
---|
2621 | """Convert MOST and 'Ferret' NetCDF format for wave propagation to |
---|
2622 | sww format native to abstract_2d_finite_volumes. |
---|
2623 | |
---|
2624 | Specify only basename_in and read files of the form |
---|
2625 | basefilename_ha.nc, basefilename_ua.nc, basefilename_va.nc containing |
---|
2626 | relative height, x-velocity and y-velocity, respectively. |
---|
2627 | |
---|
2628 | Also convert latitude and longitude to UTM. All coordinates are |
---|
2629 | assumed to be given in the GDA94 datum. |
---|
2630 | |
---|
2631 | min's and max's: If omitted - full extend is used. |
---|
2632 | To include a value min may equal it, while max must exceed it. |
---|
2633 | Lat and lon are assuemd to be in decimal degrees |
---|
2634 | |
---|
2635 | origin is a 3-tuple with geo referenced |
---|
2636 | UTM coordinates (zone, easting, northing) |
---|
2637 | |
---|
2638 | nc format has values organised as HA[TIME, LATITUDE, LONGITUDE] |
---|
2639 | which means that longitude is the fastest |
---|
2640 | varying dimension (row major order, so to speak) |
---|
2641 | |
---|
2642 | ferret2sww uses grid points as vertices in a triangular grid |
---|
2643 | counting vertices from lower left corner upwards, then right |
---|
2644 | """ |
---|
2645 | |
---|
2646 | import os |
---|
2647 | from Scientific.IO.NetCDF import NetCDFFile |
---|
2648 | from Numeric import Float, Int, Int32, searchsorted, zeros, array |
---|
2649 | from Numeric import allclose, around |
---|
2650 | |
---|
2651 | precision = Float |
---|
2652 | |
---|
2653 | msg = 'Must use latitudes and longitudes for minlat, maxlon etc' |
---|
2654 | |
---|
2655 | if minlat != None: |
---|
2656 | assert -90 < minlat < 90 , msg |
---|
2657 | if maxlat != None: |
---|
2658 | assert -90 < maxlat < 90 , msg |
---|
2659 | if minlat != None: |
---|
2660 | assert maxlat > minlat |
---|
2661 | if minlon != None: |
---|
2662 | assert -180 < minlon < 180 , msg |
---|
2663 | if maxlon != None: |
---|
2664 | assert -180 < maxlon < 180 , msg |
---|
2665 | if minlon != None: |
---|
2666 | assert maxlon > minlon |
---|
2667 | |
---|
2668 | |
---|
2669 | |
---|
2670 | #Get NetCDF data |
---|
2671 | if verbose: print 'Reading files %s_*.nc' %basename_in |
---|
2672 | #print "basename_in + '_ha.nc'",basename_in + '_ha.nc' |
---|
2673 | file_h = NetCDFFile(basename_in + '_ha.nc', 'r') #Wave amplitude (cm) |
---|
2674 | file_u = NetCDFFile(basename_in + '_ua.nc', 'r') #Velocity (x) (cm/s) |
---|
2675 | file_v = NetCDFFile(basename_in + '_va.nc', 'r') #Velocity (y) (cm/s) |
---|
2676 | file_e = NetCDFFile(basename_in + '_e.nc', 'r') #Elevation (z) (m) |
---|
2677 | |
---|
2678 | if basename_out is None: |
---|
2679 | swwname = basename_in + '.sww' |
---|
2680 | else: |
---|
2681 | swwname = basename_out + '.sww' |
---|
2682 | |
---|
2683 | # Get dimensions of file_h |
---|
2684 | for dimension in file_h.dimensions.keys(): |
---|
2685 | if dimension[:3] == 'LON': |
---|
2686 | dim_h_longitude = dimension |
---|
2687 | if dimension[:3] == 'LAT': |
---|
2688 | dim_h_latitude = dimension |
---|
2689 | if dimension[:4] == 'TIME': |
---|
2690 | dim_h_time = dimension |
---|
2691 | |
---|
2692 | # print 'long:', dim_h_longitude |
---|
2693 | # print 'lats:', dim_h_latitude |
---|
2694 | # print 'times:', dim_h_time |
---|
2695 | |
---|
2696 | times = file_h.variables[dim_h_time] |
---|
2697 | latitudes = file_h.variables[dim_h_latitude] |
---|
2698 | longitudes = file_h.variables[dim_h_longitude] |
---|
2699 | |
---|
2700 | # get dimensions for file_e |
---|
2701 | for dimension in file_e.dimensions.keys(): |
---|
2702 | if dimension[:3] == 'LON': |
---|
2703 | dim_e_longitude = dimension |
---|
2704 | if dimension[:3] == 'LAT': |
---|
2705 | dim_e_latitude = dimension |
---|
2706 | |
---|
2707 | # get dimensions for file_u |
---|
2708 | for dimension in file_u.dimensions.keys(): |
---|
2709 | if dimension[:3] == 'LON': |
---|
2710 | dim_u_longitude = dimension |
---|
2711 | if dimension[:3] == 'LAT': |
---|
2712 | dim_u_latitude = dimension |
---|
2713 | if dimension[:4] == 'TIME': |
---|
2714 | dim_u_time = dimension |
---|
2715 | |
---|
2716 | # get dimensions for file_v |
---|
2717 | for dimension in file_v.dimensions.keys(): |
---|
2718 | if dimension[:3] == 'LON': |
---|
2719 | dim_v_longitude = dimension |
---|
2720 | if dimension[:3] == 'LAT': |
---|
2721 | dim_v_latitude = dimension |
---|
2722 | if dimension[:4] == 'TIME': |
---|
2723 | dim_v_time = dimension |
---|
2724 | |
---|
2725 | |
---|
2726 | # Precision used by most for lat/lon is 4 or 5 decimals |
---|
2727 | e_lat = around(file_e.variables[dim_e_latitude][:], 5) |
---|
2728 | e_lon = around(file_e.variables[dim_e_longitude][:], 5) |
---|
2729 | |
---|
2730 | # Check that files are compatible |
---|
2731 | assert allclose(latitudes, file_u.variables[dim_u_latitude]) |
---|
2732 | assert allclose(latitudes, file_v.variables[dim_v_latitude]) |
---|
2733 | assert allclose(latitudes, e_lat) |
---|
2734 | |
---|
2735 | assert allclose(longitudes, file_u.variables[dim_u_longitude]) |
---|
2736 | assert allclose(longitudes, file_v.variables[dim_v_longitude]) |
---|
2737 | assert allclose(longitudes, e_lon) |
---|
2738 | |
---|
2739 | if mint is None: |
---|
2740 | jmin = 0 |
---|
2741 | mint = times[0] |
---|
2742 | else: |
---|
2743 | jmin = searchsorted(times, mint) |
---|
2744 | |
---|
2745 | if maxt is None: |
---|
2746 | jmax = len(times) |
---|
2747 | maxt = times[-1] |
---|
2748 | else: |
---|
2749 | jmax = searchsorted(times, maxt) |
---|
2750 | |
---|
2751 | #print "latitudes[:]",latitudes[:] |
---|
2752 | #print "longitudes[:]",longitudes [:] |
---|
2753 | kmin, kmax, lmin, lmax = _get_min_max_indexes(latitudes[:], |
---|
2754 | longitudes[:], |
---|
2755 | minlat, maxlat, |
---|
2756 | minlon, maxlon) |
---|
2757 | |
---|
2758 | |
---|
2759 | times = times[jmin:jmax] |
---|
2760 | latitudes = latitudes[kmin:kmax] |
---|
2761 | longitudes = longitudes[lmin:lmax] |
---|
2762 | |
---|
2763 | #print "latitudes[:]",latitudes[:] |
---|
2764 | #print "longitudes[:]",longitudes [:] |
---|
2765 | |
---|
2766 | if verbose: print 'cropping' |
---|
2767 | zname = 'ELEVATION' |
---|
2768 | |
---|
2769 | amplitudes = file_h.variables['HA'][jmin:jmax, kmin:kmax, lmin:lmax] |
---|
2770 | uspeed = file_u.variables['UA'][jmin:jmax, kmin:kmax, lmin:lmax] #Lon |
---|
2771 | vspeed = file_v.variables['VA'][jmin:jmax, kmin:kmax, lmin:lmax] #Lat |
---|
2772 | elevations = file_e.variables[zname][kmin:kmax, lmin:lmax] |
---|
2773 | |
---|
2774 | # if latitudes2[0]==latitudes[0] and latitudes2[-1]==latitudes[-1]: |
---|
2775 | # elevations = file_e.variables['ELEVATION'][kmin:kmax, lmin:lmax] |
---|
2776 | # elif latitudes2[0]==latitudes[-1] and latitudes2[-1]==latitudes[0]: |
---|
2777 | # from Numeric import asarray |
---|
2778 | # elevations=elevations.tolist() |
---|
2779 | # elevations.reverse() |
---|
2780 | # elevations=asarray(elevations) |
---|
2781 | # else: |
---|
2782 | # from Numeric import asarray |
---|
2783 | # elevations=elevations.tolist() |
---|
2784 | # elevations.reverse() |
---|
2785 | # elevations=asarray(elevations) |
---|
2786 | # 'print hmmm' |
---|
2787 | |
---|
2788 | |
---|
2789 | |
---|
2790 | #Get missing values |
---|
2791 | nan_ha = file_h.variables['HA'].missing_value[0] |
---|
2792 | nan_ua = file_u.variables['UA'].missing_value[0] |
---|
2793 | nan_va = file_v.variables['VA'].missing_value[0] |
---|
2794 | if hasattr(file_e.variables[zname],'missing_value'): |
---|
2795 | nan_e = file_e.variables[zname].missing_value[0] |
---|
2796 | else: |
---|
2797 | nan_e = None |
---|
2798 | |
---|
2799 | #Cleanup |
---|
2800 | from Numeric import sometrue |
---|
2801 | |
---|
2802 | missing = (amplitudes == nan_ha) |
---|
2803 | if sometrue (missing): |
---|
2804 | if fail_on_NaN: |
---|
2805 | msg = 'NetCDFFile %s contains missing values'\ |
---|
2806 | %(basename_in+'_ha.nc') |
---|
2807 | raise DataMissingValuesError, msg |
---|
2808 | else: |
---|
2809 | amplitudes = amplitudes*(missing==0) + missing*NaN_filler |
---|
2810 | |
---|
2811 | missing = (uspeed == nan_ua) |
---|
2812 | if sometrue (missing): |
---|
2813 | if fail_on_NaN: |
---|
2814 | msg = 'NetCDFFile %s contains missing values'\ |
---|
2815 | %(basename_in+'_ua.nc') |
---|
2816 | raise DataMissingValuesError, msg |
---|
2817 | else: |
---|
2818 | uspeed = uspeed*(missing==0) + missing*NaN_filler |
---|
2819 | |
---|
2820 | missing = (vspeed == nan_va) |
---|
2821 | if sometrue (missing): |
---|
2822 | if fail_on_NaN: |
---|
2823 | msg = 'NetCDFFile %s contains missing values'\ |
---|
2824 | %(basename_in+'_va.nc') |
---|
2825 | raise DataMissingValuesError, msg |
---|
2826 | else: |
---|
2827 | vspeed = vspeed*(missing==0) + missing*NaN_filler |
---|
2828 | |
---|
2829 | |
---|
2830 | missing = (elevations == nan_e) |
---|
2831 | if sometrue (missing): |
---|
2832 | if fail_on_NaN: |
---|
2833 | msg = 'NetCDFFile %s contains missing values'\ |
---|
2834 | %(basename_in+'_e.nc') |
---|
2835 | raise DataMissingValuesError, msg |
---|
2836 | else: |
---|
2837 | elevations = elevations*(missing==0) + missing*NaN_filler |
---|
2838 | |
---|
2839 | ####### |
---|
2840 | |
---|
2841 | |
---|
2842 | |
---|
2843 | number_of_times = times.shape[0] |
---|
2844 | number_of_latitudes = latitudes.shape[0] |
---|
2845 | number_of_longitudes = longitudes.shape[0] |
---|
2846 | |
---|
2847 | assert amplitudes.shape[0] == number_of_times |
---|
2848 | assert amplitudes.shape[1] == number_of_latitudes |
---|
2849 | assert amplitudes.shape[2] == number_of_longitudes |
---|
2850 | |
---|
2851 | if verbose: |
---|
2852 | print '------------------------------------------------' |
---|
2853 | print 'Statistics:' |
---|
2854 | print ' Extent (lat/lon):' |
---|
2855 | print ' lat in [%f, %f], len(lat) == %d'\ |
---|
2856 | %(min(latitudes.flat), max(latitudes.flat), |
---|
2857 | len(latitudes.flat)) |
---|
2858 | print ' lon in [%f, %f], len(lon) == %d'\ |
---|
2859 | %(min(longitudes.flat), max(longitudes.flat), |
---|
2860 | len(longitudes.flat)) |
---|
2861 | print ' t in [%f, %f], len(t) == %d'\ |
---|
2862 | %(min(times.flat), max(times.flat), len(times.flat)) |
---|
2863 | |
---|
2864 | q = amplitudes.flat |
---|
2865 | name = 'Amplitudes (ha) [cm]' |
---|
2866 | print ' %s in [%f, %f]' %(name, min(q), max(q)) |
---|
2867 | |
---|
2868 | q = uspeed.flat |
---|
2869 | name = 'Speeds (ua) [cm/s]' |
---|
2870 | print ' %s in [%f, %f]' %(name, min(q), max(q)) |
---|
2871 | |
---|
2872 | q = vspeed.flat |
---|
2873 | name = 'Speeds (va) [cm/s]' |
---|
2874 | print ' %s in [%f, %f]' %(name, min(q), max(q)) |
---|
2875 | |
---|
2876 | q = elevations.flat |
---|
2877 | name = 'Elevations (e) [m]' |
---|
2878 | print ' %s in [%f, %f]' %(name, min(q), max(q)) |
---|
2879 | |
---|
2880 | |
---|
2881 | # print number_of_latitudes, number_of_longitudes |
---|
2882 | number_of_points = number_of_latitudes*number_of_longitudes |
---|
2883 | number_of_volumes = (number_of_latitudes-1)*(number_of_longitudes-1)*2 |
---|
2884 | |
---|
2885 | |
---|
2886 | file_h.close() |
---|
2887 | file_u.close() |
---|
2888 | file_v.close() |
---|
2889 | file_e.close() |
---|
2890 | |
---|
2891 | |
---|
2892 | # NetCDF file definition |
---|
2893 | outfile = NetCDFFile(swwname, 'w') |
---|
2894 | |
---|
2895 | description = 'Converted from Ferret files: %s, %s, %s, %s'\ |
---|
2896 | %(basename_in + '_ha.nc', |
---|
2897 | basename_in + '_ua.nc', |
---|
2898 | basename_in + '_va.nc', |
---|
2899 | basename_in + '_e.nc') |
---|
2900 | |
---|
2901 | # Create new file |
---|
2902 | starttime = times[0] |
---|
2903 | |
---|
2904 | sww = Write_sww() |
---|
2905 | sww.store_header(outfile, times, number_of_volumes, |
---|
2906 | number_of_points, description=description, |
---|
2907 | verbose=verbose,sww_precision=Float) |
---|
2908 | |
---|
2909 | # Store |
---|
2910 | from anuga.coordinate_transforms.redfearn import redfearn |
---|
2911 | x = zeros(number_of_points, Float) #Easting |
---|
2912 | y = zeros(number_of_points, Float) #Northing |
---|
2913 | |
---|
2914 | |
---|
2915 | if verbose: print 'Making triangular grid' |
---|
2916 | |
---|
2917 | # Check zone boundaries |
---|
2918 | refzone, _, _ = redfearn(latitudes[0],longitudes[0]) |
---|
2919 | |
---|
2920 | vertices = {} |
---|
2921 | i = 0 |
---|
2922 | for k, lat in enumerate(latitudes): #Y direction |
---|
2923 | for l, lon in enumerate(longitudes): #X direction |
---|
2924 | |
---|
2925 | vertices[l,k] = i |
---|
2926 | |
---|
2927 | zone, easting, northing = redfearn(lat,lon) |
---|
2928 | |
---|
2929 | msg = 'Zone boundary crossed at longitude =', lon |
---|
2930 | #assert zone == refzone, msg |
---|
2931 | #print '%7.2f %7.2f %8.2f %8.2f' %(lon, lat, easting, northing) |
---|
2932 | x[i] = easting |
---|
2933 | y[i] = northing |
---|
2934 | i += 1 |
---|
2935 | |
---|
2936 | #Construct 2 triangles per 'rectangular' element |
---|
2937 | volumes = [] |
---|
2938 | for l in range(number_of_longitudes-1): #X direction |
---|
2939 | for k in range(number_of_latitudes-1): #Y direction |
---|
2940 | v1 = vertices[l,k+1] |
---|
2941 | v2 = vertices[l,k] |
---|
2942 | v3 = vertices[l+1,k+1] |
---|
2943 | v4 = vertices[l+1,k] |
---|
2944 | |
---|
2945 | volumes.append([v1,v2,v3]) #Upper element |
---|
2946 | volumes.append([v4,v3,v2]) #Lower element |
---|
2947 | |
---|
2948 | volumes = array(volumes) |
---|
2949 | |
---|
2950 | if origin is None: |
---|
2951 | origin = Geo_reference(refzone,min(x),min(y)) |
---|
2952 | geo_ref = write_NetCDF_georeference(origin, outfile) |
---|
2953 | |
---|
2954 | if elevation is not None: |
---|
2955 | z = elevation |
---|
2956 | else: |
---|
2957 | if inverted_bathymetry: |
---|
2958 | z = -1*elevations |
---|
2959 | else: |
---|
2960 | z = elevations |
---|
2961 | #FIXME: z should be obtained from MOST and passed in here |
---|
2962 | |
---|
2963 | #FIXME use the Write_sww instance(sww) to write this info |
---|
2964 | from Numeric import resize |
---|
2965 | z = resize(z,outfile.variables['z'][:].shape) |
---|
2966 | outfile.variables['x'][:] = x - geo_ref.get_xllcorner() |
---|
2967 | outfile.variables['y'][:] = y - geo_ref.get_yllcorner() |
---|
2968 | outfile.variables['z'][:] = z #FIXME HACK for bacwards compat. |
---|
2969 | outfile.variables['elevation'][:] = z |
---|
2970 | outfile.variables['volumes'][:] = volumes.astype(Int32) #For Opteron 64 |
---|
2971 | |
---|
2972 | |
---|
2973 | |
---|
2974 | #Time stepping |
---|
2975 | stage = outfile.variables['stage'] |
---|
2976 | xmomentum = outfile.variables['xmomentum'] |
---|
2977 | ymomentum = outfile.variables['ymomentum'] |
---|
2978 | |
---|
2979 | if verbose: print 'Converting quantities' |
---|
2980 | n = len(times) |
---|
2981 | for j in range(n): |
---|
2982 | if verbose and j%((n+10)/10)==0: print ' Doing %d of %d' %(j, n) |
---|
2983 | i = 0 |
---|
2984 | for k in range(number_of_latitudes): #Y direction |
---|
2985 | for l in range(number_of_longitudes): #X direction |
---|
2986 | w = zscale*amplitudes[j,k,l]/100 + mean_stage |
---|
2987 | stage[j,i] = w |
---|
2988 | h = w - z[i] |
---|
2989 | xmomentum[j,i] = uspeed[j,k,l]/100*h |
---|
2990 | ymomentum[j,i] = vspeed[j,k,l]/100*h |
---|
2991 | i += 1 |
---|
2992 | |
---|
2993 | #outfile.close() |
---|
2994 | |
---|
2995 | #FIXME: Refactor using code from file_function.statistics |
---|
2996 | #Something like print swwstats(swwname) |
---|
2997 | if verbose: |
---|
2998 | x = outfile.variables['x'][:] |
---|
2999 | y = outfile.variables['y'][:] |
---|
3000 | print '------------------------------------------------' |
---|
3001 | print 'Statistics of output file:' |
---|
3002 | print ' Name: %s' %swwname |
---|
3003 | print ' Reference:' |
---|
3004 | print ' Lower left corner: [%f, %f]'\ |
---|
3005 | %(geo_ref.get_xllcorner(), geo_ref.get_yllcorner()) |
---|
3006 | print ' Start time: %f' %starttime |
---|
3007 | print ' Min time: %f' %mint |
---|
3008 | print ' Max time: %f' %maxt |
---|
3009 | print ' Extent:' |
---|
3010 | print ' x [m] in [%f, %f], len(x) == %d'\ |
---|
3011 | %(min(x.flat), max(x.flat), len(x.flat)) |
---|
3012 | print ' y [m] in [%f, %f], len(y) == %d'\ |
---|
3013 | %(min(y.flat), max(y.flat), len(y.flat)) |
---|
3014 | print ' t [s] in [%f, %f], len(t) == %d'\ |
---|
3015 | %(min(times), max(times), len(times)) |
---|
3016 | print ' Quantities [SI units]:' |
---|
3017 | for name in ['stage', 'xmomentum', 'ymomentum', 'elevation']: |
---|
3018 | q = outfile.variables[name][:].flat |
---|
3019 | print ' %s in [%f, %f]' %(name, min(q), max(q)) |
---|
3020 | |
---|
3021 | |
---|
3022 | |
---|
3023 | outfile.close() |
---|
3024 | |
---|
3025 | |
---|
3026 | |
---|
3027 | |
---|
3028 | |
---|
3029 | def timefile2netcdf(filename, quantity_names=None, time_as_seconds=False): |
---|
3030 | """Template for converting typical text files with time series to |
---|
3031 | NetCDF tms file. |
---|
3032 | |
---|
3033 | |
---|
3034 | The file format is assumed to be either two fields separated by a comma: |
---|
3035 | |
---|
3036 | time [DD/MM/YY hh:mm:ss], value0 value1 value2 ... |
---|
3037 | |
---|
3038 | E.g |
---|
3039 | |
---|
3040 | 31/08/04 00:00:00, 1.328223 0 0 |
---|
3041 | 31/08/04 00:15:00, 1.292912 0 0 |
---|
3042 | |
---|
3043 | or time (seconds), value0 value1 value2 ... |
---|
3044 | |
---|
3045 | 0.0, 1.328223 0 0 |
---|
3046 | 0.1, 1.292912 0 0 |
---|
3047 | |
---|
3048 | will provide a time dependent function f(t) with three attributes |
---|
3049 | |
---|
3050 | filename is assumed to be the rootname with extenisons .txt and .sww |
---|
3051 | """ |
---|
3052 | |
---|
3053 | import time, calendar |
---|
3054 | from Numeric import array |
---|
3055 | from anuga.config import time_format |
---|
3056 | from anuga.utilities.numerical_tools import ensure_numeric |
---|
3057 | |
---|
3058 | |
---|
3059 | |
---|
3060 | fid = open(filename + '.txt') |
---|
3061 | line = fid.readline() |
---|
3062 | fid.close() |
---|
3063 | |
---|
3064 | fields = line.split(',') |
---|
3065 | msg = 'File %s must have the format date, value0 value1 value2 ...' |
---|
3066 | assert len(fields) == 2, msg |
---|
3067 | |
---|
3068 | if not time_as_seconds: |
---|
3069 | try: |
---|
3070 | starttime = calendar.timegm(time.strptime(fields[0], time_format)) |
---|
3071 | except ValueError: |
---|
3072 | msg = 'First field in file %s must be' %filename |
---|
3073 | msg += ' date-time with format %s.\n' %time_format |
---|
3074 | msg += 'I got %s instead.' %fields[0] |
---|
3075 | raise DataTimeError, msg |
---|
3076 | else: |
---|
3077 | try: |
---|
3078 | starttime = float(fields[0]) |
---|
3079 | except Error: |
---|
3080 | msg = "Bad time format" |
---|
3081 | raise DataTimeError, msg |
---|
3082 | |
---|
3083 | |
---|
3084 | #Split values |
---|
3085 | values = [] |
---|
3086 | for value in fields[1].split(): |
---|
3087 | values.append(float(value)) |
---|
3088 | |
---|
3089 | q = ensure_numeric(values) |
---|
3090 | |
---|
3091 | msg = 'ERROR: File must contain at least one independent value' |
---|
3092 | assert len(q.shape) == 1, msg |
---|
3093 | |
---|
3094 | |
---|
3095 | |
---|
3096 | #Read times proper |
---|
3097 | from Numeric import zeros, Float, alltrue |
---|
3098 | from anuga.config import time_format |
---|
3099 | import time, calendar |
---|
3100 | |
---|
3101 | fid = open(filename + '.txt') |
---|
3102 | lines = fid.readlines() |
---|
3103 | fid.close() |
---|
3104 | |
---|
3105 | N = len(lines) |
---|
3106 | d = len(q) |
---|
3107 | |
---|
3108 | T = zeros(N, Float) #Time |
---|
3109 | Q = zeros((N, d), Float) #Values |
---|
3110 | |
---|
3111 | for i, line in enumerate(lines): |
---|
3112 | fields = line.split(',') |
---|
3113 | if not time_as_seconds: |
---|
3114 | realtime = calendar.timegm(time.strptime(fields[0], time_format)) |
---|
3115 | else: |
---|
3116 | realtime = float(fields[0]) |
---|
3117 | T[i] = realtime - starttime |
---|
3118 | |
---|
3119 | for j, value in enumerate(fields[1].split()): |
---|
3120 | Q[i, j] = float(value) |
---|
3121 | |
---|
3122 | msg = 'File %s must list time as a monotonuosly ' %filename |
---|
3123 | msg += 'increasing sequence' |
---|
3124 | assert alltrue( T[1:] - T[:-1] > 0 ), msg |
---|
3125 | |
---|
3126 | #Create NetCDF file |
---|
3127 | from Scientific.IO.NetCDF import NetCDFFile |
---|
3128 | |
---|
3129 | fid = NetCDFFile(filename + '.tms', 'w') |
---|
3130 | |
---|
3131 | |
---|
3132 | fid.institution = 'Geoscience Australia' |
---|
3133 | fid.description = 'Time series' |
---|
3134 | |
---|
3135 | |
---|
3136 | #Reference point |
---|
3137 | #Start time in seconds since the epoch (midnight 1/1/1970) |
---|
3138 | #FIXME: Use Georef |
---|
3139 | fid.starttime = starttime |
---|
3140 | |
---|
3141 | # dimension definitions |
---|
3142 | #fid.createDimension('number_of_volumes', self.number_of_volumes) |
---|
3143 | #fid.createDimension('number_of_vertices', 3) |
---|
3144 | |
---|
3145 | |
---|
3146 | fid.createDimension('number_of_timesteps', len(T)) |
---|
3147 | |
---|
3148 | fid.createVariable('time', Float, ('number_of_timesteps',)) |
---|
3149 | |
---|
3150 | fid.variables['time'][:] = T |
---|
3151 | |
---|
3152 | for i in range(Q.shape[1]): |
---|
3153 | try: |
---|
3154 | name = quantity_names[i] |
---|
3155 | except: |
---|
3156 | name = 'Attribute%d'%i |
---|
3157 | |
---|
3158 | fid.createVariable(name, Float, ('number_of_timesteps',)) |
---|
3159 | fid.variables[name][:] = Q[:,i] |
---|
3160 | |
---|
3161 | fid.close() |
---|
3162 | |
---|
3163 | |
---|
3164 | def extent_sww(file_name): |
---|
3165 | """ |
---|
3166 | Read in an sww file. |
---|
3167 | |
---|
3168 | Input; |
---|
3169 | file_name - the sww file |
---|
3170 | |
---|
3171 | Output; |
---|
3172 | z - Vector of bed elevation |
---|
3173 | volumes - Array. Each row has 3 values, representing |
---|
3174 | the vertices that define the volume |
---|
3175 | time - Vector of the times where there is stage information |
---|
3176 | stage - array with respect to time and vertices (x,y) |
---|
3177 | """ |
---|
3178 | |
---|
3179 | |
---|
3180 | from Scientific.IO.NetCDF import NetCDFFile |
---|
3181 | |
---|
3182 | #Check contents |
---|
3183 | #Get NetCDF |
---|
3184 | fid = NetCDFFile(file_name, 'r') |
---|
3185 | |
---|
3186 | # Get the variables |
---|
3187 | x = fid.variables['x'][:] |
---|
3188 | y = fid.variables['y'][:] |
---|
3189 | stage = fid.variables['stage'][:] |
---|
3190 | #print "stage",stage |
---|
3191 | #print "stage.shap",stage.shape |
---|
3192 | #print "min(stage.flat), mpythonax(stage.flat)",min(stage.flat), max(stage.flat) |
---|
3193 | #print "min(stage)",min(stage) |
---|
3194 | |
---|
3195 | fid.close() |
---|
3196 | |
---|
3197 | return [min(x),max(x),min(y),max(y),min(stage.flat),max(stage.flat)] |
---|
3198 | |
---|
3199 | |
---|
3200 | def sww2domain(filename,boundary=None,t=None,\ |
---|
3201 | fail_if_NaN=True,NaN_filler=0\ |
---|
3202 | ,verbose = False,very_verbose = False): |
---|
3203 | """ |
---|
3204 | Usage: domain = sww2domain('file.sww',t=time (default = last time in file)) |
---|
3205 | |
---|
3206 | Boundary is not recommended if domain.smooth is not selected, as it |
---|
3207 | uses unique coordinates, but not unique boundaries. This means that |
---|
3208 | the boundary file will not be compatable with the coordinates, and will |
---|
3209 | give a different final boundary, or crash. |
---|
3210 | """ |
---|
3211 | NaN=9.969209968386869e+036 |
---|
3212 | #initialise NaN. |
---|
3213 | |
---|
3214 | from Scientific.IO.NetCDF import NetCDFFile |
---|
3215 | from shallow_water import Domain |
---|
3216 | from Numeric import asarray, transpose, resize |
---|
3217 | |
---|
3218 | if verbose: print 'Reading from ', filename |
---|
3219 | fid = NetCDFFile(filename, 'r') #Open existing file for read |
---|
3220 | time = fid.variables['time'] #Timesteps |
---|
3221 | if t is None: |
---|
3222 | t = time[-1] |
---|
3223 | time_interp = get_time_interp(time,t) |
---|
3224 | |
---|
3225 | # Get the variables as Numeric arrays |
---|
3226 | x = fid.variables['x'][:] #x-coordinates of vertices |
---|
3227 | y = fid.variables['y'][:] #y-coordinates of vertices |
---|
3228 | elevation = fid.variables['elevation'] #Elevation |
---|
3229 | stage = fid.variables['stage'] #Water level |
---|
3230 | xmomentum = fid.variables['xmomentum'] #Momentum in the x-direction |
---|
3231 | ymomentum = fid.variables['ymomentum'] #Momentum in the y-direction |
---|
3232 | |
---|
3233 | starttime = fid.starttime[0] |
---|
3234 | volumes = fid.variables['volumes'][:] #Connectivity |
---|
3235 | coordinates=transpose(asarray([x.tolist(),y.tolist()])) |
---|
3236 | |
---|
3237 | conserved_quantities = [] |
---|
3238 | interpolated_quantities = {} |
---|
3239 | other_quantities = [] |
---|
3240 | |
---|
3241 | # get geo_reference |
---|
3242 | #sww files don't have to have a geo_ref |
---|
3243 | try: |
---|
3244 | geo_reference = Geo_reference(NetCDFObject=fid) |
---|
3245 | except: #AttributeError, e: |
---|
3246 | geo_reference = None |
---|
3247 | |
---|
3248 | if verbose: print ' getting quantities' |
---|
3249 | for quantity in fid.variables.keys(): |
---|
3250 | dimensions = fid.variables[quantity].dimensions |
---|
3251 | if 'number_of_timesteps' in dimensions: |
---|
3252 | conserved_quantities.append(quantity) |
---|
3253 | interpolated_quantities[quantity]=\ |
---|
3254 | interpolated_quantity(fid.variables[quantity][:],time_interp) |
---|
3255 | else: other_quantities.append(quantity) |
---|
3256 | |
---|
3257 | other_quantities.remove('x') |
---|
3258 | other_quantities.remove('y') |
---|
3259 | other_quantities.remove('z') |
---|
3260 | other_quantities.remove('volumes') |
---|
3261 | try: |
---|
3262 | other_quantities.remove('stage_range') |
---|
3263 | other_quantities.remove('xmomentum_range') |
---|
3264 | other_quantities.remove('ymomentum_range') |
---|
3265 | other_quantities.remove('elevation_range') |
---|
3266 | except: |
---|
3267 | pass |
---|
3268 | |
---|
3269 | |
---|
3270 | conserved_quantities.remove('time') |
---|
3271 | |
---|
3272 | if verbose: print ' building domain' |
---|
3273 | # From domain.Domain: |
---|
3274 | # domain = Domain(coordinates, volumes,\ |
---|
3275 | # conserved_quantities = conserved_quantities,\ |
---|
3276 | # other_quantities = other_quantities,zone=zone,\ |
---|
3277 | # xllcorner=xllcorner, yllcorner=yllcorner) |
---|
3278 | |
---|
3279 | # From shallow_water.Domain: |
---|
3280 | coordinates=coordinates.tolist() |
---|
3281 | volumes=volumes.tolist() |
---|
3282 | #FIXME:should this be in mesh?(peter row) |
---|
3283 | if fid.smoothing == 'Yes': unique = False |
---|
3284 | else: unique = True |
---|
3285 | if unique: |
---|
3286 | coordinates,volumes,boundary=weed(coordinates,volumes,boundary) |
---|
3287 | |
---|
3288 | |
---|
3289 | try: |
---|
3290 | domain = Domain(coordinates, volumes, boundary) |
---|
3291 | except AssertionError, e: |
---|
3292 | fid.close() |
---|
3293 | msg = 'Domain could not be created: %s. Perhaps use "fail_if_NaN=False and NaN_filler = ..."' %e |
---|
3294 | raise DataDomainError, msg |
---|
3295 | |
---|
3296 | if not boundary is None: |
---|
3297 | domain.boundary = boundary |
---|
3298 | |
---|
3299 | domain.geo_reference = geo_reference |
---|
3300 | |
---|
3301 | domain.starttime=float(starttime)+float(t) |
---|
3302 | domain.time=0.0 |
---|
3303 | |
---|
3304 | for quantity in other_quantities: |
---|
3305 | try: |
---|
3306 | NaN = fid.variables[quantity].missing_value |
---|
3307 | except: |
---|
3308 | pass #quantity has no missing_value number |
---|
3309 | X = fid.variables[quantity][:] |
---|
3310 | if very_verbose: |
---|
3311 | print ' ',quantity |
---|
3312 | print ' NaN =',NaN |
---|
3313 | print ' max(X)' |
---|
3314 | print ' ',max(X) |
---|
3315 | print ' max(X)==NaN' |
---|
3316 | print ' ',max(X)==NaN |
---|
3317 | print '' |
---|
3318 | if (max(X)==NaN) or (min(X)==NaN): |
---|
3319 | if fail_if_NaN: |
---|
3320 | msg = 'quantity "%s" contains no_data entry'%quantity |
---|
3321 | raise DataMissingValuesError, msg |
---|
3322 | else: |
---|
3323 | data = (X<>NaN) |
---|
3324 | X = (X*data)+(data==0)*NaN_filler |
---|
3325 | if unique: |
---|
3326 | X = resize(X,(len(X)/3,3)) |
---|
3327 | domain.set_quantity(quantity,X) |
---|
3328 | # |
---|
3329 | for quantity in conserved_quantities: |
---|
3330 | try: |
---|
3331 | NaN = fid.variables[quantity].missing_value |
---|
3332 | except: |
---|
3333 | pass #quantity has no missing_value number |
---|
3334 | X = interpolated_quantities[quantity] |
---|
3335 | if very_verbose: |
---|
3336 | print ' ',quantity |
---|
3337 | print ' NaN =',NaN |
---|
3338 | print ' max(X)' |
---|
3339 | print ' ',max(X) |
---|
3340 | print ' max(X)==NaN' |
---|
3341 | print ' ',max(X)==NaN |
---|
3342 | print '' |
---|
3343 | if (max(X)==NaN) or (min(X)==NaN): |
---|
3344 | if fail_if_NaN: |
---|
3345 | msg = 'quantity "%s" contains no_data entry'%quantity |
---|
3346 | raise DataMissingValuesError, msg |
---|
3347 | else: |
---|
3348 | data = (X<>NaN) |
---|
3349 | X = (X*data)+(data==0)*NaN_filler |
---|
3350 | if unique: |
---|
3351 | X = resize(X,(X.shape[0]/3,3)) |
---|
3352 | domain.set_quantity(quantity,X) |
---|
3353 | |
---|
3354 | fid.close() |
---|
3355 | return domain |
---|
3356 | |
---|
3357 | def interpolated_quantity(saved_quantity,time_interp): |
---|
3358 | |
---|
3359 | #given an index and ratio, interpolate quantity with respect to time. |
---|
3360 | index,ratio = time_interp |
---|
3361 | Q = saved_quantity |
---|
3362 | if ratio > 0: |
---|
3363 | q = (1-ratio)*Q[index]+ ratio*Q[index+1] |
---|
3364 | else: |
---|
3365 | q = Q[index] |
---|
3366 | #Return vector of interpolated values |
---|
3367 | return q |
---|
3368 | |
---|
3369 | def get_time_interp(time,t=None): |
---|
3370 | #Finds the ratio and index for time interpolation. |
---|
3371 | #It is borrowed from previous abstract_2d_finite_volumes code. |
---|
3372 | if t is None: |
---|
3373 | t=time[-1] |
---|
3374 | index = -1 |
---|
3375 | ratio = 0. |
---|
3376 | else: |
---|
3377 | T = time |
---|
3378 | tau = t |
---|
3379 | index=0 |
---|
3380 | msg = 'Time interval derived from file %s [%s:%s]'\ |
---|
3381 | %('FIXMEfilename', T[0], T[-1]) |
---|
3382 | msg += ' does not match model time: %s' %tau |
---|
3383 | if tau < time[0]: raise DataTimeError, msg |
---|
3384 | if tau > time[-1]: raise DataTimeError, msg |
---|
3385 | while tau > time[index]: index += 1 |
---|
3386 | while tau < time[index]: index -= 1 |
---|
3387 | if tau == time[index]: |
---|
3388 | #Protect against case where tau == time[-1] (last time) |
---|
3389 | # - also works in general when tau == time[i] |
---|
3390 | ratio = 0 |
---|
3391 | else: |
---|
3392 | #t is now between index and index+1 |
---|
3393 | ratio = (tau - time[index])/(time[index+1] - time[index]) |
---|
3394 | return (index,ratio) |
---|
3395 | |
---|
3396 | |
---|
3397 | def weed(coordinates,volumes,boundary = None): |
---|
3398 | if type(coordinates)==ArrayType: |
---|
3399 | coordinates = coordinates.tolist() |
---|
3400 | if type(volumes)==ArrayType: |
---|
3401 | volumes = volumes.tolist() |
---|
3402 | |
---|
3403 | unique = False |
---|
3404 | point_dict = {} |
---|
3405 | same_point = {} |
---|
3406 | for i in range(len(coordinates)): |
---|
3407 | point = tuple(coordinates[i]) |
---|
3408 | if point_dict.has_key(point): |
---|
3409 | unique = True |
---|
3410 | same_point[i]=point |
---|
3411 | #to change all point i references to point j |
---|
3412 | else: |
---|
3413 | point_dict[point]=i |
---|
3414 | same_point[i]=point |
---|
3415 | |
---|
3416 | coordinates = [] |
---|
3417 | i = 0 |
---|
3418 | for point in point_dict.keys(): |
---|
3419 | point = tuple(point) |
---|
3420 | coordinates.append(list(point)) |
---|
3421 | point_dict[point]=i |
---|
3422 | i+=1 |
---|
3423 | |
---|
3424 | |
---|
3425 | for volume in volumes: |
---|
3426 | for i in range(len(volume)): |
---|
3427 | index = volume[i] |
---|
3428 | if index>-1: |
---|
3429 | volume[i]=point_dict[same_point[index]] |
---|
3430 | |
---|
3431 | new_boundary = {} |
---|
3432 | if not boundary is None: |
---|
3433 | for segment in boundary.keys(): |
---|
3434 | point0 = point_dict[same_point[segment[0]]] |
---|
3435 | point1 = point_dict[same_point[segment[1]]] |
---|
3436 | label = boundary[segment] |
---|
3437 | #FIXME should the bounday attributes be concaterated |
---|
3438 | #('exterior, pond') or replaced ('pond')(peter row) |
---|
3439 | |
---|
3440 | if new_boundary.has_key((point0,point1)): |
---|
3441 | new_boundary[(point0,point1)]=new_boundary[(point0,point1)]#\ |
---|
3442 | #+','+label |
---|
3443 | |
---|
3444 | elif new_boundary.has_key((point1,point0)): |
---|
3445 | new_boundary[(point1,point0)]=new_boundary[(point1,point0)]#\ |
---|
3446 | #+','+label |
---|
3447 | else: new_boundary[(point0,point1)]=label |
---|
3448 | |
---|
3449 | boundary = new_boundary |
---|
3450 | |
---|
3451 | return coordinates,volumes,boundary |
---|
3452 | |
---|
3453 | |
---|
3454 | def decimate_dem(basename_in, stencil, cellsize_new, basename_out=None, |
---|
3455 | verbose=False): |
---|
3456 | """Read Digitial Elevation model from the following NetCDF format (.dem) |
---|
3457 | |
---|
3458 | Example: |
---|
3459 | |
---|
3460 | ncols 3121 |
---|
3461 | nrows 1800 |
---|
3462 | xllcorner 722000 |
---|
3463 | yllcorner 5893000 |
---|
3464 | cellsize 25 |
---|
3465 | NODATA_value -9999 |
---|
3466 | 138.3698 137.4194 136.5062 135.5558 .......... |
---|
3467 | |
---|
3468 | Decimate data to cellsize_new using stencil and write to NetCDF dem format. |
---|
3469 | """ |
---|
3470 | |
---|
3471 | import os |
---|
3472 | from Scientific.IO.NetCDF import NetCDFFile |
---|
3473 | from Numeric import Float, zeros, sum, reshape, equal |
---|
3474 | |
---|
3475 | root = basename_in |
---|
3476 | inname = root + '.dem' |
---|
3477 | |
---|
3478 | #Open existing netcdf file to read |
---|
3479 | infile = NetCDFFile(inname, 'r') |
---|
3480 | if verbose: print 'Reading DEM from %s' %inname |
---|
3481 | |
---|
3482 | #Read metadata |
---|
3483 | ncols = infile.ncols[0] |
---|
3484 | nrows = infile.nrows[0] |
---|
3485 | xllcorner = infile.xllcorner[0] |
---|
3486 | yllcorner = infile.yllcorner[0] |
---|
3487 | cellsize = infile.cellsize[0] |
---|
3488 | NODATA_value = infile.NODATA_value[0] |
---|
3489 | zone = infile.zone[0] |
---|
3490 | false_easting = infile.false_easting[0] |
---|
3491 | false_northing = infile.false_northing[0] |
---|
3492 | projection = infile.projection |
---|
3493 | datum = infile.datum |
---|
3494 | units = infile.units |
---|
3495 | |
---|
3496 | dem_elevation = infile.variables['elevation'] |
---|
3497 | |
---|
3498 | #Get output file name |
---|
3499 | if basename_out == None: |
---|
3500 | outname = root + '_' + repr(cellsize_new) + '.dem' |
---|
3501 | else: |
---|
3502 | outname = basename_out + '.dem' |
---|
3503 | |
---|
3504 | if verbose: print 'Write decimated NetCDF file to %s' %outname |
---|
3505 | |
---|
3506 | #Determine some dimensions for decimated grid |
---|
3507 | (nrows_stencil, ncols_stencil) = stencil.shape |
---|
3508 | x_offset = ncols_stencil / 2 |
---|
3509 | y_offset = nrows_stencil / 2 |
---|
3510 | cellsize_ratio = int(cellsize_new / cellsize) |
---|
3511 | ncols_new = 1 + (ncols - ncols_stencil) / cellsize_ratio |
---|
3512 | nrows_new = 1 + (nrows - nrows_stencil) / cellsize_ratio |
---|
3513 | |
---|
3514 | #Open netcdf file for output |
---|
3515 | outfile = NetCDFFile(outname, 'w') |
---|
3516 | |
---|
3517 | #Create new file |
---|
3518 | outfile.institution = 'Geoscience Australia' |
---|
3519 | outfile.description = 'NetCDF DEM format for compact and portable storage ' +\ |
---|
3520 | 'of spatial point data' |
---|
3521 | #Georeferencing |
---|
3522 | outfile.zone = zone |
---|
3523 | outfile.projection = projection |
---|
3524 | outfile.datum = datum |
---|
3525 | outfile.units = units |
---|
3526 | |
---|
3527 | outfile.cellsize = cellsize_new |
---|
3528 | outfile.NODATA_value = NODATA_value |
---|
3529 | outfile.false_easting = false_easting |
---|
3530 | outfile.false_northing = false_northing |
---|
3531 | |
---|
3532 | outfile.xllcorner = xllcorner + (x_offset * cellsize) |
---|
3533 | outfile.yllcorner = yllcorner + (y_offset * cellsize) |
---|
3534 | outfile.ncols = ncols_new |
---|
3535 | outfile.nrows = nrows_new |
---|
3536 | |
---|
3537 | # dimension definition |
---|
3538 | outfile.createDimension('number_of_points', nrows_new*ncols_new) |
---|
3539 | |
---|
3540 | # variable definition |
---|
3541 | outfile.createVariable('elevation', Float, ('number_of_points',)) |
---|
3542 | |
---|
3543 | # Get handle to the variable |
---|
3544 | elevation = outfile.variables['elevation'] |
---|
3545 | |
---|
3546 | dem_elevation_r = reshape(dem_elevation, (nrows, ncols)) |
---|
3547 | |
---|
3548 | #Store data |
---|
3549 | global_index = 0 |
---|
3550 | for i in range(nrows_new): |
---|
3551 | if verbose: print 'Processing row %d of %d' %(i, nrows_new) |
---|
3552 | lower_index = global_index |
---|
3553 | telev = zeros(ncols_new, Float) |
---|
3554 | local_index = 0 |
---|
3555 | trow = i * cellsize_ratio |
---|
3556 | |
---|
3557 | for j in range(ncols_new): |
---|
3558 | tcol = j * cellsize_ratio |
---|
3559 | tmp = dem_elevation_r[trow:trow+nrows_stencil, tcol:tcol+ncols_stencil] |
---|
3560 | |
---|
3561 | #if dem contains 1 or more NODATA_values set value in |
---|
3562 | #decimated dem to NODATA_value, else compute decimated |
---|
3563 | #value using stencil |
---|
3564 | if sum(sum(equal(tmp, NODATA_value))) > 0: |
---|
3565 | telev[local_index] = NODATA_value |
---|
3566 | else: |
---|
3567 | telev[local_index] = sum(sum(tmp * stencil)) |
---|
3568 | |
---|
3569 | global_index += 1 |
---|
3570 | local_index += 1 |
---|
3571 | |
---|
3572 | upper_index = global_index |
---|
3573 | |
---|
3574 | elevation[lower_index:upper_index] = telev |
---|
3575 | |
---|
3576 | assert global_index == nrows_new*ncols_new, 'index not equal to number of points' |
---|
3577 | |
---|
3578 | infile.close() |
---|
3579 | outfile.close() |
---|
3580 | |
---|
3581 | |
---|
3582 | |
---|
3583 | |
---|
3584 | def tsh2sww(filename, verbose=False): |
---|
3585 | """ |
---|
3586 | to check if a tsh/msh file 'looks' good. |
---|
3587 | """ |
---|
3588 | |
---|
3589 | |
---|
3590 | if verbose == True:print 'Creating domain from', filename |
---|
3591 | domain = pmesh_to_domain_instance(filename, Domain) |
---|
3592 | if verbose == True:print "Number of triangles = ", len(domain) |
---|
3593 | |
---|
3594 | domain.smooth = True |
---|
3595 | domain.format = 'sww' #Native netcdf visualisation format |
---|
3596 | file_path, filename = path.split(filename) |
---|
3597 | filename, ext = path.splitext(filename) |
---|
3598 | domain.set_name(filename) |
---|
3599 | domain.reduction = mean |
---|
3600 | if verbose == True:print "file_path",file_path |
---|
3601 | if file_path == "":file_path = "." |
---|
3602 | domain.set_datadir(file_path) |
---|
3603 | |
---|
3604 | if verbose == True: |
---|
3605 | print "Output written to " + domain.get_datadir() + sep + \ |
---|
3606 | domain.get_name() + "." + domain.format |
---|
3607 | sww = get_dataobject(domain) |
---|
3608 | sww.store_connectivity() |
---|
3609 | sww.store_timestep() |
---|
3610 | |
---|
3611 | |
---|
3612 | def asc_csiro2sww(bath_dir, |
---|
3613 | elevation_dir, |
---|
3614 | ucur_dir, |
---|
3615 | vcur_dir, |
---|
3616 | sww_file, |
---|
3617 | minlat = None, maxlat = None, |
---|
3618 | minlon = None, maxlon = None, |
---|
3619 | zscale=1, |
---|
3620 | mean_stage = 0, |
---|
3621 | fail_on_NaN = True, |
---|
3622 | elevation_NaN_filler = 0, |
---|
3623 | bath_prefix='ba', |
---|
3624 | elevation_prefix='el', |
---|
3625 | verbose=False): |
---|
3626 | """ |
---|
3627 | Produce an sww boundary file, from esri ascii data from CSIRO. |
---|
3628 | |
---|
3629 | Also convert latitude and longitude to UTM. All coordinates are |
---|
3630 | assumed to be given in the GDA94 datum. |
---|
3631 | |
---|
3632 | assume: |
---|
3633 | All files are in esri ascii format |
---|
3634 | |
---|
3635 | 4 types of information |
---|
3636 | bathymetry |
---|
3637 | elevation |
---|
3638 | u velocity |
---|
3639 | v velocity |
---|
3640 | |
---|
3641 | Assumptions |
---|
3642 | The metadata of all the files is the same |
---|
3643 | Each type is in a seperate directory |
---|
3644 | One bath file with extention .000 |
---|
3645 | The time period is less than 24hrs and uniform. |
---|
3646 | """ |
---|
3647 | from Scientific.IO.NetCDF import NetCDFFile |
---|
3648 | |
---|
3649 | from anuga.coordinate_transforms.redfearn import redfearn |
---|
3650 | |
---|
3651 | precision = Float # So if we want to change the precision its done here |
---|
3652 | |
---|
3653 | # go in to the bath dir and load the only file, |
---|
3654 | bath_files = os.listdir(bath_dir) |
---|
3655 | |
---|
3656 | bath_file = bath_files[0] |
---|
3657 | bath_dir_file = bath_dir + os.sep + bath_file |
---|
3658 | bath_metadata,bath_grid = _read_asc(bath_dir_file) |
---|
3659 | |
---|
3660 | #Use the date.time of the bath file as a basis for |
---|
3661 | #the start time for other files |
---|
3662 | base_start = bath_file[-12:] |
---|
3663 | |
---|
3664 | #go into the elevation dir and load the 000 file |
---|
3665 | elevation_dir_file = elevation_dir + os.sep + elevation_prefix \ |
---|
3666 | + base_start |
---|
3667 | |
---|
3668 | elevation_files = os.listdir(elevation_dir) |
---|
3669 | ucur_files = os.listdir(ucur_dir) |
---|
3670 | vcur_files = os.listdir(vcur_dir) |
---|
3671 | elevation_files.sort() |
---|
3672 | # the first elevation file should be the |
---|
3673 | # file with the same base name as the bath data |
---|
3674 | assert elevation_files[0] == 'el' + base_start |
---|
3675 | |
---|
3676 | number_of_latitudes = bath_grid.shape[0] |
---|
3677 | number_of_longitudes = bath_grid.shape[1] |
---|
3678 | number_of_volumes = (number_of_latitudes-1)*(number_of_longitudes-1)*2 |
---|
3679 | |
---|
3680 | longitudes = [bath_metadata['xllcorner']+x*bath_metadata['cellsize'] \ |
---|
3681 | for x in range(number_of_longitudes)] |
---|
3682 | latitudes = [bath_metadata['yllcorner']+y*bath_metadata['cellsize'] \ |
---|
3683 | for y in range(number_of_latitudes)] |
---|
3684 | |
---|
3685 | # reverse order of lat, so the fist lat represents the first grid row |
---|
3686 | latitudes.reverse() |
---|
3687 | |
---|
3688 | kmin, kmax, lmin, lmax = _get_min_max_indexes(latitudes[:],longitudes[:], |
---|
3689 | minlat=minlat, maxlat=maxlat, |
---|
3690 | minlon=minlon, maxlon=maxlon) |
---|
3691 | |
---|
3692 | |
---|
3693 | bath_grid = bath_grid[kmin:kmax,lmin:lmax] |
---|
3694 | latitudes = latitudes[kmin:kmax] |
---|
3695 | longitudes = longitudes[lmin:lmax] |
---|
3696 | number_of_latitudes = len(latitudes) |
---|
3697 | number_of_longitudes = len(longitudes) |
---|
3698 | number_of_times = len(os.listdir(elevation_dir)) |
---|
3699 | number_of_points = number_of_latitudes*number_of_longitudes |
---|
3700 | number_of_volumes = (number_of_latitudes-1)*(number_of_longitudes-1)*2 |
---|
3701 | |
---|
3702 | #Work out the times |
---|
3703 | if len(elevation_files) > 1: |
---|
3704 | # Assume: The time period is less than 24hrs. |
---|
3705 | time_period = (int(elevation_files[1][-3:]) - \ |
---|
3706 | int(elevation_files[0][-3:]))*60*60 |
---|
3707 | times = [x*time_period for x in range(len(elevation_files))] |
---|
3708 | else: |
---|
3709 | times = [0.0] |
---|
3710 | |
---|
3711 | |
---|
3712 | if verbose: |
---|
3713 | print '------------------------------------------------' |
---|
3714 | print 'Statistics:' |
---|
3715 | print ' Extent (lat/lon):' |
---|
3716 | print ' lat in [%f, %f], len(lat) == %d'\ |
---|
3717 | %(min(latitudes), max(latitudes), |
---|
3718 | len(latitudes)) |
---|
3719 | print ' lon in [%f, %f], len(lon) == %d'\ |
---|
3720 | %(min(longitudes), max(longitudes), |
---|
3721 | len(longitudes)) |
---|
3722 | print ' t in [%f, %f], len(t) == %d'\ |
---|
3723 | %(min(times), max(times), len(times)) |
---|
3724 | |
---|
3725 | ######### WRITE THE SWW FILE ############# |
---|
3726 | # NetCDF file definition |
---|
3727 | outfile = NetCDFFile(sww_file, 'w') |
---|
3728 | |
---|
3729 | #Create new file |
---|
3730 | outfile.institution = 'Geoscience Australia' |
---|
3731 | outfile.description = 'Converted from XXX' |
---|
3732 | |
---|
3733 | |
---|
3734 | #For sww compatibility |
---|
3735 | outfile.smoothing = 'Yes' |
---|
3736 | outfile.order = 1 |
---|
3737 | |
---|
3738 | #Start time in seconds since the epoch (midnight 1/1/1970) |
---|
3739 | outfile.starttime = starttime = times[0] |
---|
3740 | |
---|
3741 | |
---|
3742 | # dimension definitions |
---|
3743 | outfile.createDimension('number_of_volumes', number_of_volumes) |
---|
3744 | |
---|
3745 | outfile.createDimension('number_of_vertices', 3) |
---|
3746 | outfile.createDimension('number_of_points', number_of_points) |
---|
3747 | outfile.createDimension('number_of_timesteps', number_of_times) |
---|
3748 | |
---|
3749 | # variable definitions |
---|
3750 | outfile.createVariable('x', precision, ('number_of_points',)) |
---|
3751 | outfile.createVariable('y', precision, ('number_of_points',)) |
---|
3752 | outfile.createVariable('elevation', precision, ('number_of_points',)) |
---|
3753 | |
---|
3754 | #FIXME: Backwards compatibility |
---|
3755 | outfile.createVariable('z', precision, ('number_of_points',)) |
---|
3756 | ################################# |
---|
3757 | |
---|
3758 | outfile.createVariable('volumes', Int, ('number_of_volumes', |
---|
3759 | 'number_of_vertices')) |
---|
3760 | |
---|
3761 | outfile.createVariable('time', precision, |
---|
3762 | ('number_of_timesteps',)) |
---|
3763 | |
---|
3764 | outfile.createVariable('stage', precision, |
---|
3765 | ('number_of_timesteps', |
---|
3766 | 'number_of_points')) |
---|
3767 | |
---|
3768 | outfile.createVariable('xmomentum', precision, |
---|
3769 | ('number_of_timesteps', |
---|
3770 | 'number_of_points')) |
---|
3771 | |
---|
3772 | outfile.createVariable('ymomentum', precision, |
---|
3773 | ('number_of_timesteps', |
---|
3774 | 'number_of_points')) |
---|
3775 | |
---|
3776 | #Store |
---|
3777 | from anuga.coordinate_transforms.redfearn import redfearn |
---|
3778 | x = zeros(number_of_points, Float) #Easting |
---|
3779 | y = zeros(number_of_points, Float) #Northing |
---|
3780 | |
---|
3781 | if verbose: print 'Making triangular grid' |
---|
3782 | #Get zone of 1st point. |
---|
3783 | refzone, _, _ = redfearn(latitudes[0],longitudes[0]) |
---|
3784 | |
---|
3785 | vertices = {} |
---|
3786 | i = 0 |
---|
3787 | for k, lat in enumerate(latitudes): |
---|
3788 | for l, lon in enumerate(longitudes): |
---|
3789 | |
---|
3790 | vertices[l,k] = i |
---|
3791 | |
---|
3792 | zone, easting, northing = redfearn(lat,lon) |
---|
3793 | |
---|
3794 | msg = 'Zone boundary crossed at longitude =', lon |
---|
3795 | #assert zone == refzone, msg |
---|
3796 | #print '%7.2f %7.2f %8.2f %8.2f' %(lon, lat, easting, northing) |
---|
3797 | x[i] = easting |
---|
3798 | y[i] = northing |
---|
3799 | i += 1 |
---|
3800 | |
---|
3801 | |
---|
3802 | #Construct 2 triangles per 'rectangular' element |
---|
3803 | volumes = [] |
---|
3804 | for l in range(number_of_longitudes-1): #X direction |
---|
3805 | for k in range(number_of_latitudes-1): #Y direction |
---|
3806 | v1 = vertices[l,k+1] |
---|
3807 | v2 = vertices[l,k] |
---|
3808 | v3 = vertices[l+1,k+1] |
---|
3809 | v4 = vertices[l+1,k] |
---|
3810 | |
---|
3811 | #Note, this is different to the ferrit2sww code |
---|
3812 | #since the order of the lats is reversed. |
---|
3813 | volumes.append([v1,v3,v2]) #Upper element |
---|
3814 | volumes.append([v4,v2,v3]) #Lower element |
---|
3815 | |
---|
3816 | volumes = array(volumes) |
---|
3817 | |
---|
3818 | geo_ref = Geo_reference(refzone,min(x),min(y)) |
---|
3819 | geo_ref.write_NetCDF(outfile) |
---|
3820 | |
---|
3821 | # This will put the geo ref in the middle |
---|
3822 | #geo_ref = Geo_reference(refzone,(max(x)+min(x))/2.0,(max(x)+min(y))/2.) |
---|
3823 | |
---|
3824 | |
---|
3825 | if verbose: |
---|
3826 | print '------------------------------------------------' |
---|
3827 | print 'More Statistics:' |
---|
3828 | print ' Extent (/lon):' |
---|
3829 | print ' x in [%f, %f], len(lat) == %d'\ |
---|
3830 | %(min(x), max(x), |
---|
3831 | len(x)) |
---|
3832 | print ' y in [%f, %f], len(lon) == %d'\ |
---|
3833 | %(min(y), max(y), |
---|
3834 | len(y)) |
---|
3835 | print 'geo_ref: ',geo_ref |
---|
3836 | |
---|
3837 | z = resize(bath_grid,outfile.variables['z'][:].shape) |
---|
3838 | outfile.variables['x'][:] = x - geo_ref.get_xllcorner() |
---|
3839 | outfile.variables['y'][:] = y - geo_ref.get_yllcorner() |
---|
3840 | outfile.variables['z'][:] = z |
---|
3841 | outfile.variables['elevation'][:] = z #FIXME HACK |
---|
3842 | outfile.variables['volumes'][:] = volumes.astype(Int32) #On Opteron 64 |
---|
3843 | |
---|
3844 | stage = outfile.variables['stage'] |
---|
3845 | xmomentum = outfile.variables['xmomentum'] |
---|
3846 | ymomentum = outfile.variables['ymomentum'] |
---|
3847 | |
---|
3848 | outfile.variables['time'][:] = times #Store time relative |
---|
3849 | |
---|
3850 | if verbose: print 'Converting quantities' |
---|
3851 | n = number_of_times |
---|
3852 | for j in range(number_of_times): |
---|
3853 | # load in files |
---|
3854 | elevation_meta, elevation_grid = \ |
---|
3855 | _read_asc(elevation_dir + os.sep + elevation_files[j]) |
---|
3856 | |
---|
3857 | _, u_momentum_grid = _read_asc(ucur_dir + os.sep + ucur_files[j]) |
---|
3858 | _, v_momentum_grid = _read_asc(vcur_dir + os.sep + vcur_files[j]) |
---|
3859 | |
---|
3860 | #cut matrix to desired size |
---|
3861 | elevation_grid = elevation_grid[kmin:kmax,lmin:lmax] |
---|
3862 | u_momentum_grid = u_momentum_grid[kmin:kmax,lmin:lmax] |
---|
3863 | v_momentum_grid = v_momentum_grid[kmin:kmax,lmin:lmax] |
---|
3864 | |
---|
3865 | # handle missing values |
---|
3866 | missing = (elevation_grid == elevation_meta['NODATA_value']) |
---|
3867 | if sometrue (missing): |
---|
3868 | if fail_on_NaN: |
---|
3869 | msg = 'File %s contains missing values'\ |
---|
3870 | %(elevation_files[j]) |
---|
3871 | raise DataMissingValuesError, msg |
---|
3872 | else: |
---|
3873 | elevation_grid = elevation_grid*(missing==0) + \ |
---|
3874 | missing*elevation_NaN_filler |
---|
3875 | |
---|
3876 | |
---|
3877 | if verbose and j%((n+10)/10)==0: print ' Doing %d of %d' %(j, n) |
---|
3878 | i = 0 |
---|
3879 | for k in range(number_of_latitudes): #Y direction |
---|
3880 | for l in range(number_of_longitudes): #X direction |
---|
3881 | w = zscale*elevation_grid[k,l] + mean_stage |
---|
3882 | stage[j,i] = w |
---|
3883 | h = w - z[i] |
---|
3884 | xmomentum[j,i] = u_momentum_grid[k,l]*h |
---|
3885 | ymomentum[j,i] = v_momentum_grid[k,l]*h |
---|
3886 | i += 1 |
---|
3887 | outfile.close() |
---|
3888 | |
---|
3889 | def _get_min_max_indexes(latitudes_ref,longitudes_ref, |
---|
3890 | minlat=None, maxlat=None, |
---|
3891 | minlon=None, maxlon=None): |
---|
3892 | """ |
---|
3893 | return max, min indexes (for slicing) of the lat and long arrays to cover the area |
---|
3894 | specified with min/max lat/long |
---|
3895 | |
---|
3896 | Think of the latitudes and longitudes describing a 2d surface. |
---|
3897 | The area returned is, if possible, just big enough to cover the |
---|
3898 | inputed max/min area. (This will not be possible if the max/min area |
---|
3899 | has a section outside of the latitudes/longitudes area.) |
---|
3900 | |
---|
3901 | asset longitudes are sorted, |
---|
3902 | long - from low to high (west to east, eg 148 - 151) |
---|
3903 | assert latitudes are sorted, ascending or decending |
---|
3904 | """ |
---|
3905 | latitudes = latitudes_ref[:] |
---|
3906 | longitudes = longitudes_ref[:] |
---|
3907 | |
---|
3908 | latitudes = ensure_numeric(latitudes) |
---|
3909 | longitudes = ensure_numeric(longitudes) |
---|
3910 | |
---|
3911 | assert allclose(sort(longitudes), longitudes) |
---|
3912 | |
---|
3913 | lat_ascending = True |
---|
3914 | if not allclose(sort(latitudes), latitudes): |
---|
3915 | lat_ascending = False |
---|
3916 | # reverse order of lat, so it's in ascending order |
---|
3917 | latitudes = latitudes[::-1] |
---|
3918 | assert allclose(sort(latitudes), latitudes) |
---|
3919 | #print "latitudes in funct", latitudes |
---|
3920 | |
---|
3921 | largest_lat_index = len(latitudes)-1 |
---|
3922 | #Cut out a smaller extent. |
---|
3923 | if minlat == None: |
---|
3924 | lat_min_index = 0 |
---|
3925 | else: |
---|
3926 | lat_min_index = searchsorted(latitudes, minlat)-1 |
---|
3927 | if lat_min_index <0: |
---|
3928 | lat_min_index = 0 |
---|
3929 | |
---|
3930 | |
---|
3931 | if maxlat == None: |
---|
3932 | lat_max_index = largest_lat_index #len(latitudes) |
---|
3933 | else: |
---|
3934 | lat_max_index = searchsorted(latitudes, maxlat) |
---|
3935 | if lat_max_index > largest_lat_index: |
---|
3936 | lat_max_index = largest_lat_index |
---|
3937 | |
---|
3938 | if minlon == None: |
---|
3939 | lon_min_index = 0 |
---|
3940 | else: |
---|
3941 | lon_min_index = searchsorted(longitudes, minlon)-1 |
---|
3942 | if lon_min_index <0: |
---|
3943 | lon_min_index = 0 |
---|
3944 | |
---|
3945 | if maxlon == None: |
---|
3946 | lon_max_index = len(longitudes) |
---|
3947 | else: |
---|
3948 | lon_max_index = searchsorted(longitudes, maxlon) |
---|
3949 | |
---|
3950 | # Reversing the indexes, if the lat array is decending |
---|
3951 | if lat_ascending is False: |
---|
3952 | lat_min_index, lat_max_index = largest_lat_index - lat_max_index , \ |
---|
3953 | largest_lat_index - lat_min_index |
---|
3954 | lat_max_index = lat_max_index + 1 # taking into account how slicing works |
---|
3955 | lon_max_index = lon_max_index + 1 # taking into account how slicing works |
---|
3956 | |
---|
3957 | return lat_min_index, lat_max_index, lon_min_index, lon_max_index |
---|
3958 | |
---|
3959 | |
---|
3960 | def _read_asc(filename, verbose=False): |
---|
3961 | """Read esri file from the following ASCII format (.asc) |
---|
3962 | |
---|
3963 | Example: |
---|
3964 | |
---|
3965 | ncols 3121 |
---|
3966 | nrows 1800 |
---|
3967 | xllcorner 722000 |
---|
3968 | yllcorner 5893000 |
---|
3969 | cellsize 25 |
---|
3970 | NODATA_value -9999 |
---|
3971 | 138.3698 137.4194 136.5062 135.5558 .......... |
---|
3972 | |
---|
3973 | """ |
---|
3974 | |
---|
3975 | datafile = open(filename) |
---|
3976 | |
---|
3977 | if verbose: print 'Reading DEM from %s' %(filename) |
---|
3978 | lines = datafile.readlines() |
---|
3979 | datafile.close() |
---|
3980 | |
---|
3981 | if verbose: print 'Got', len(lines), ' lines' |
---|
3982 | |
---|
3983 | ncols = int(lines.pop(0).split()[1].strip()) |
---|
3984 | nrows = int(lines.pop(0).split()[1].strip()) |
---|
3985 | xllcorner = float(lines.pop(0).split()[1].strip()) |
---|
3986 | yllcorner = float(lines.pop(0).split()[1].strip()) |
---|
3987 | cellsize = float(lines.pop(0).split()[1].strip()) |
---|
3988 | NODATA_value = float(lines.pop(0).split()[1].strip()) |
---|
3989 | |
---|
3990 | assert len(lines) == nrows |
---|
3991 | |
---|
3992 | #Store data |
---|
3993 | grid = [] |
---|
3994 | |
---|
3995 | n = len(lines) |
---|
3996 | for i, line in enumerate(lines): |
---|
3997 | cells = line.split() |
---|
3998 | assert len(cells) == ncols |
---|
3999 | grid.append(array([float(x) for x in cells])) |
---|
4000 | grid = array(grid) |
---|
4001 | |
---|
4002 | return {'xllcorner':xllcorner, |
---|
4003 | 'yllcorner':yllcorner, |
---|
4004 | 'cellsize':cellsize, |
---|
4005 | 'NODATA_value':NODATA_value}, grid |
---|
4006 | |
---|
4007 | |
---|
4008 | |
---|
4009 | #### URS 2 SWW ### |
---|
4010 | |
---|
4011 | lon_name = 'LON' |
---|
4012 | lat_name = 'LAT' |
---|
4013 | time_name = 'TIME' |
---|
4014 | precision = Float # So if we want to change the precision its done here |
---|
4015 | class Write_nc: |
---|
4016 | """ |
---|
4017 | Write an nc file. |
---|
4018 | |
---|
4019 | Note, this should be checked to meet cdc netcdf conventions for gridded |
---|
4020 | data. http://www.cdc.noaa.gov/cdc/conventions/cdc_netcdf_standard.shtml |
---|
4021 | |
---|
4022 | """ |
---|
4023 | def __init__(self, |
---|
4024 | quantity_name, |
---|
4025 | file_name, |
---|
4026 | time_step_count, |
---|
4027 | time_step, |
---|
4028 | lon, |
---|
4029 | lat): |
---|
4030 | """ |
---|
4031 | time_step_count is the number of time steps. |
---|
4032 | time_step is the time step size |
---|
4033 | |
---|
4034 | pre-condition: quantity_name must be 'HA' 'UA'or 'VA'. |
---|
4035 | """ |
---|
4036 | self.quantity_name = quantity_name |
---|
4037 | quantity_units = {'HA':'CENTIMETERS', |
---|
4038 | 'UA':'CENTIMETERS/SECOND', |
---|
4039 | 'VA':'CENTIMETERS/SECOND'} |
---|
4040 | |
---|
4041 | multiplier_dic = {'HA':100.0, # To convert from m to cm |
---|
4042 | 'UA':100.0, # m/s to cm/sec |
---|
4043 | 'VA':-100.0} # MUX files have positve x in the |
---|
4044 | # Southern direction. This corrects for it, when writing nc files. |
---|
4045 | |
---|
4046 | self.quantity_multiplier = multiplier_dic[self.quantity_name] |
---|
4047 | |
---|
4048 | #self.file_name = file_name |
---|
4049 | self.time_step_count = time_step_count |
---|
4050 | self.time_step = time_step |
---|
4051 | |
---|
4052 | # NetCDF file definition |
---|
4053 | self.outfile = NetCDFFile(file_name, 'w') |
---|
4054 | outfile = self.outfile |
---|
4055 | |
---|
4056 | #Create new file |
---|
4057 | nc_lon_lat_header(outfile, lon, lat) |
---|
4058 | |
---|
4059 | # TIME |
---|
4060 | outfile.createDimension(time_name, None) |
---|
4061 | outfile.createVariable(time_name, precision, (time_name,)) |
---|
4062 | |
---|
4063 | #QUANTITY |
---|
4064 | outfile.createVariable(self.quantity_name, precision, |
---|
4065 | (time_name, lat_name, lon_name)) |
---|
4066 | outfile.variables[self.quantity_name].missing_value=-1.e+034 |
---|
4067 | outfile.variables[self.quantity_name].units= \ |
---|
4068 | quantity_units[self.quantity_name] |
---|
4069 | outfile.variables[lon_name][:]= ensure_numeric(lon) |
---|
4070 | outfile.variables[lat_name][:]= ensure_numeric(lat) |
---|
4071 | |
---|
4072 | #Assume no one will be wanting to read this, while we are writing |
---|
4073 | #outfile.close() |
---|
4074 | |
---|
4075 | def store_timestep(self, quantity_slice): |
---|
4076 | """ |
---|
4077 | Write a time slice of quantity info |
---|
4078 | quantity_slice is the data to be stored at this time step |
---|
4079 | """ |
---|
4080 | |
---|
4081 | outfile = self.outfile |
---|
4082 | |
---|
4083 | # Get the variables |
---|
4084 | time = outfile.variables[time_name] |
---|
4085 | quantity = outfile.variables[self.quantity_name] |
---|
4086 | |
---|
4087 | i = len(time) |
---|
4088 | |
---|
4089 | #Store time |
---|
4090 | time[i] = i*self.time_step #self.domain.time |
---|
4091 | quantity[i,:] = quantity_slice* self.quantity_multiplier |
---|
4092 | |
---|
4093 | def close(self): |
---|
4094 | self.outfile.close() |
---|
4095 | |
---|
4096 | def urs2sww(basename_in='o', basename_out=None, verbose=False, |
---|
4097 | remove_nc_files=True, |
---|
4098 | minlat=None, maxlat=None, |
---|
4099 | minlon= None, maxlon=None, |
---|
4100 | mint=None, maxt=None, |
---|
4101 | mean_stage=0, |
---|
4102 | origin = None, |
---|
4103 | zscale=1, |
---|
4104 | fail_on_NaN=True, |
---|
4105 | NaN_filler=0, |
---|
4106 | elevation=None): |
---|
4107 | """ |
---|
4108 | Convert URS C binary format for wave propagation to |
---|
4109 | sww format native to abstract_2d_finite_volumes. |
---|
4110 | |
---|
4111 | Specify only basename_in and read files of the form |
---|
4112 | basefilename_velocity-z-mux, basefilename_velocity-e-mux and |
---|
4113 | basefilename_waveheight-n-mux containing relative height, |
---|
4114 | x-velocity and y-velocity, respectively. |
---|
4115 | |
---|
4116 | Also convert latitude and longitude to UTM. All coordinates are |
---|
4117 | assumed to be given in the GDA94 datum. The latitude and longitude |
---|
4118 | information is for a grid. |
---|
4119 | |
---|
4120 | min's and max's: If omitted - full extend is used. |
---|
4121 | To include a value min may equal it, while max must exceed it. |
---|
4122 | Lat and lon are assumed to be in decimal degrees. |
---|
4123 | NOTE: minlon is the most east boundary. |
---|
4124 | |
---|
4125 | origin is a 3-tuple with geo referenced |
---|
4126 | UTM coordinates (zone, easting, northing) |
---|
4127 | It will be the origin of the sww file. This shouldn't be used, |
---|
4128 | since all of anuga should be able to handle an arbitary origin. |
---|
4129 | |
---|
4130 | |
---|
4131 | URS C binary format has data orgainised as TIME, LONGITUDE, LATITUDE |
---|
4132 | which means that latitude is the fastest |
---|
4133 | varying dimension (row major order, so to speak) |
---|
4134 | |
---|
4135 | In URS C binary the latitudes and longitudes are in assending order. |
---|
4136 | """ |
---|
4137 | if basename_out == None: |
---|
4138 | basename_out = basename_in |
---|
4139 | files_out = urs2nc(basename_in, basename_out) |
---|
4140 | ferret2sww(basename_out, |
---|
4141 | minlat=minlat, |
---|
4142 | maxlat=maxlat, |
---|
4143 | minlon=minlon, |
---|
4144 | maxlon=maxlon, |
---|
4145 | mint=mint, |
---|
4146 | maxt=maxt, |
---|
4147 | mean_stage=mean_stage, |
---|
4148 | origin=origin, |
---|
4149 | zscale=zscale, |
---|
4150 | fail_on_NaN=fail_on_NaN, |
---|
4151 | NaN_filler=NaN_filler, |
---|
4152 | inverted_bathymetry=True, |
---|
4153 | verbose=verbose) |
---|
4154 | #print "files_out",files_out |
---|
4155 | if remove_nc_files: |
---|
4156 | for file_out in files_out: |
---|
4157 | os.remove(file_out) |
---|
4158 | |
---|
4159 | def urs2nc(basename_in = 'o', basename_out = 'urs'): |
---|
4160 | """ |
---|
4161 | Convert the 3 urs files to 4 nc files. |
---|
4162 | |
---|
4163 | The name of the urs file names must be; |
---|
4164 | [basename_in]_velocity-z-mux |
---|
4165 | [basename_in]_velocity-e-mux |
---|
4166 | [basename_in]_waveheight-n-mux |
---|
4167 | |
---|
4168 | """ |
---|
4169 | |
---|
4170 | files_in = [basename_in + WAVEHEIGHT_MUX_LABEL, |
---|
4171 | basename_in + EAST_VELOCITY_LABEL, |
---|
4172 | basename_in + NORTH_VELOCITY_LABEL] |
---|
4173 | files_out = [basename_out+'_ha.nc', |
---|
4174 | basename_out+'_ua.nc', |
---|
4175 | basename_out+'_va.nc'] |
---|
4176 | quantities = ['HA','UA','VA'] |
---|
4177 | |
---|
4178 | #if os.access(files_in[0]+'.mux', os.F_OK) == 0 : |
---|
4179 | for i, file_name in enumerate(files_in): |
---|
4180 | if os.access(file_name, os.F_OK) == 0: |
---|
4181 | if os.access(file_name+'.mux', os.F_OK) == 0 : |
---|
4182 | msg = 'File %s does not exist or is not accessible' %file_name |
---|
4183 | raise IOError, msg |
---|
4184 | else: |
---|
4185 | files_in[i] += '.mux' |
---|
4186 | print "file_name", file_name |
---|
4187 | hashed_elevation = None |
---|
4188 | for file_in, file_out, quantity in map(None, files_in, |
---|
4189 | files_out, |
---|
4190 | quantities): |
---|
4191 | lonlatdep, lon, lat, depth = _binary_c2nc(file_in, |
---|
4192 | file_out, |
---|
4193 | quantity) |
---|
4194 | #print "lonlatdep", lonlatdep |
---|
4195 | if hashed_elevation == None: |
---|
4196 | elevation_file = basename_out+'_e.nc' |
---|
4197 | write_elevation_nc(elevation_file, |
---|
4198 | lon, |
---|
4199 | lat, |
---|
4200 | depth) |
---|
4201 | hashed_elevation = myhash(lonlatdep) |
---|
4202 | else: |
---|
4203 | msg = "The elevation information in the mux files is inconsistent" |
---|
4204 | assert hashed_elevation == myhash(lonlatdep), msg |
---|
4205 | files_out.append(elevation_file) |
---|
4206 | return files_out |
---|
4207 | |
---|
4208 | def _binary_c2nc(file_in, file_out, quantity): |
---|
4209 | """ |
---|
4210 | Reads in a quantity urs file and writes a quantity nc file. |
---|
4211 | additionally, returns the depth and lat, long info, |
---|
4212 | so it can be written to a file. |
---|
4213 | """ |
---|
4214 | columns = 3 # long, lat , depth |
---|
4215 | mux_file = open(file_in, 'rb') |
---|
4216 | |
---|
4217 | # Number of points/stations |
---|
4218 | (points_num,)= unpack('i',mux_file.read(4)) |
---|
4219 | |
---|
4220 | # nt, int - Number of time steps |
---|
4221 | (time_step_count,)= unpack('i',mux_file.read(4)) |
---|
4222 | |
---|
4223 | #dt, float - time step, seconds |
---|
4224 | (time_step,) = unpack('f', mux_file.read(4)) |
---|
4225 | |
---|
4226 | msg = "Bad data in the mux file." |
---|
4227 | if points_num < 0: |
---|
4228 | mux_file.close() |
---|
4229 | raise ANUGAError, msg |
---|
4230 | if time_step_count < 0: |
---|
4231 | mux_file.close() |
---|
4232 | raise ANUGAError, msg |
---|
4233 | if time_step < 0: |
---|
4234 | mux_file.close() |
---|
4235 | raise ANUGAError, msg |
---|
4236 | |
---|
4237 | lonlatdep = p_array.array('f') |
---|
4238 | lonlatdep.read(mux_file, columns * points_num) |
---|
4239 | lonlatdep = array(lonlatdep, typecode=Float) |
---|
4240 | lonlatdep = reshape(lonlatdep, (points_num, columns)) |
---|
4241 | |
---|
4242 | lon, lat, depth = lon_lat2grid(lonlatdep) |
---|
4243 | lon_sorted = list(lon) |
---|
4244 | lon_sorted.sort() |
---|
4245 | |
---|
4246 | if not lon == lon_sorted: |
---|
4247 | msg = "Longitudes in mux file are not in ascending order" |
---|
4248 | raise IOError, msg |
---|
4249 | lat_sorted = list(lat) |
---|
4250 | lat_sorted.sort() |
---|
4251 | |
---|
4252 | if not lat == lat_sorted: |
---|
4253 | msg = "Latitudes in mux file are not in ascending order" |
---|
4254 | |
---|
4255 | nc_file = Write_nc(quantity, |
---|
4256 | file_out, |
---|
4257 | time_step_count, |
---|
4258 | time_step, |
---|
4259 | lon, |
---|
4260 | lat) |
---|
4261 | |
---|
4262 | for i in range(time_step_count): |
---|
4263 | #Read in a time slice from mux file |
---|
4264 | hz_p_array = p_array.array('f') |
---|
4265 | hz_p_array.read(mux_file, points_num) |
---|
4266 | hz_p = array(hz_p_array, typecode=Float) |
---|
4267 | hz_p = reshape(hz_p, (len(lon), len(lat))) |
---|
4268 | hz_p = transpose(hz_p) #mux has lat varying fastest, nc has long v.f. |
---|
4269 | |
---|
4270 | #write time slice to nc file |
---|
4271 | nc_file.store_timestep(hz_p) |
---|
4272 | mux_file.close() |
---|
4273 | nc_file.close() |
---|
4274 | |
---|
4275 | return lonlatdep, lon, lat, depth |
---|
4276 | |
---|
4277 | |
---|
4278 | def write_elevation_nc(file_out, lon, lat, depth_vector): |
---|
4279 | """ |
---|
4280 | Write an nc elevation file. |
---|
4281 | """ |
---|
4282 | |
---|
4283 | # NetCDF file definition |
---|
4284 | outfile = NetCDFFile(file_out, 'w') |
---|
4285 | |
---|
4286 | #Create new file |
---|
4287 | nc_lon_lat_header(outfile, lon, lat) |
---|
4288 | |
---|
4289 | # ELEVATION |
---|
4290 | zname = 'ELEVATION' |
---|
4291 | outfile.createVariable(zname, precision, (lat_name, lon_name)) |
---|
4292 | outfile.variables[zname].units='CENTIMETERS' |
---|
4293 | outfile.variables[zname].missing_value=-1.e+034 |
---|
4294 | |
---|
4295 | outfile.variables[lon_name][:]= ensure_numeric(lon) |
---|
4296 | outfile.variables[lat_name][:]= ensure_numeric(lat) |
---|
4297 | |
---|
4298 | depth = reshape(depth_vector, ( len(lat), len(lon))) |
---|
4299 | outfile.variables[zname][:]= depth |
---|
4300 | |
---|
4301 | outfile.close() |
---|
4302 | |
---|
4303 | def nc_lon_lat_header(outfile, lon, lat): |
---|
4304 | """ |
---|
4305 | outfile is the netcdf file handle. |
---|
4306 | lon - a list/array of the longitudes |
---|
4307 | lat - a list/array of the latitudes |
---|
4308 | """ |
---|
4309 | |
---|
4310 | outfile.institution = 'Geoscience Australia' |
---|
4311 | outfile.description = 'Converted from URS binary C' |
---|
4312 | |
---|
4313 | # Longitude |
---|
4314 | outfile.createDimension(lon_name, len(lon)) |
---|
4315 | outfile.createVariable(lon_name, precision, (lon_name,)) |
---|
4316 | outfile.variables[lon_name].point_spacing='uneven' |
---|
4317 | outfile.variables[lon_name].units='degrees_east' |
---|
4318 | outfile.variables[lon_name].assignValue(lon) |
---|
4319 | |
---|
4320 | |
---|
4321 | # Latitude |
---|
4322 | outfile.createDimension(lat_name, len(lat)) |
---|
4323 | outfile.createVariable(lat_name, precision, (lat_name,)) |
---|
4324 | outfile.variables[lat_name].point_spacing='uneven' |
---|
4325 | outfile.variables[lat_name].units='degrees_north' |
---|
4326 | outfile.variables[lat_name].assignValue(lat) |
---|
4327 | |
---|
4328 | |
---|
4329 | |
---|
4330 | def lon_lat2grid(long_lat_dep): |
---|
4331 | """ |
---|
4332 | given a list of points that are assumed to be an a grid, |
---|
4333 | return the long's and lat's of the grid. |
---|
4334 | long_lat_dep is an array where each row is a position. |
---|
4335 | The first column is longitudes. |
---|
4336 | The second column is latitudes. |
---|
4337 | |
---|
4338 | The latitude is the fastest varying dimension - in mux files |
---|
4339 | """ |
---|
4340 | LONG = 0 |
---|
4341 | LAT = 1 |
---|
4342 | QUANTITY = 2 |
---|
4343 | |
---|
4344 | long_lat_dep = ensure_numeric(long_lat_dep, Float) |
---|
4345 | |
---|
4346 | num_points = long_lat_dep.shape[0] |
---|
4347 | this_rows_long = long_lat_dep[0,LONG] |
---|
4348 | |
---|
4349 | # Count the length of unique latitudes |
---|
4350 | i = 0 |
---|
4351 | while long_lat_dep[i,LONG] == this_rows_long and i < num_points: |
---|
4352 | i += 1 |
---|
4353 | # determine the lats and longsfrom the grid |
---|
4354 | lat = long_lat_dep[:i, LAT] |
---|
4355 | long = long_lat_dep[::i, LONG] |
---|
4356 | |
---|
4357 | lenlong = len(long) |
---|
4358 | lenlat = len(lat) |
---|
4359 | #print 'len lat', lat, len(lat) |
---|
4360 | #print 'len long', long, len(long) |
---|
4361 | |
---|
4362 | msg = 'Input data is not gridded' |
---|
4363 | assert num_points % lenlat == 0, msg |
---|
4364 | assert num_points % lenlong == 0, msg |
---|
4365 | |
---|
4366 | # Test that data is gridded |
---|
4367 | for i in range(lenlong): |
---|
4368 | msg = 'Data is not gridded. It must be for this operation' |
---|
4369 | first = i*lenlat |
---|
4370 | last = first + lenlat |
---|
4371 | |
---|
4372 | assert allclose(long_lat_dep[first:last,LAT], lat), msg |
---|
4373 | assert allclose(long_lat_dep[first:last,LONG], long[i]), msg |
---|
4374 | |
---|
4375 | |
---|
4376 | # print 'range long', min(long), max(long) |
---|
4377 | # print 'range lat', min(lat), max(lat) |
---|
4378 | # print 'ref long', min(long_lat_dep[:,0]), max(long_lat_dep[:,0]) |
---|
4379 | # print 'ref lat', min(long_lat_dep[:,1]), max(long_lat_dep[:,1]) |
---|
4380 | |
---|
4381 | |
---|
4382 | |
---|
4383 | msg = 'Out of range latitudes/longitudes' |
---|
4384 | for l in lat:assert -90 < l < 90 , msg |
---|
4385 | for l in long:assert -180 < l < 180 , msg |
---|
4386 | |
---|
4387 | # Changing quantity from lat being the fastest varying dimension to |
---|
4388 | # long being the fastest varying dimension |
---|
4389 | # FIXME - make this faster/do this a better way |
---|
4390 | # use numeric transpose, after reshaping the quantity vector |
---|
4391 | # quantity = zeros(len(long_lat_dep), Float) |
---|
4392 | quantity = zeros(num_points, Float) |
---|
4393 | |
---|
4394 | # print 'num',num_points |
---|
4395 | for lat_i, _ in enumerate(lat): |
---|
4396 | for long_i, _ in enumerate(long): |
---|
4397 | q_index = lat_i*lenlong+long_i |
---|
4398 | lld_index = long_i*lenlat+lat_i |
---|
4399 | # print 'lat_i', lat_i, 'long_i',long_i, 'q_index', q_index, 'lld_index', lld_index |
---|
4400 | temp = long_lat_dep[lld_index, QUANTITY] |
---|
4401 | quantity[q_index] = temp |
---|
4402 | |
---|
4403 | return long, lat, quantity |
---|
4404 | |
---|
4405 | #### END URS 2 SWW ### |
---|
4406 | |
---|
4407 | #### URS UNGRIDDED 2 SWW ### |
---|
4408 | |
---|
4409 | ### PRODUCING THE POINTS NEEDED FILE ### |
---|
4410 | |
---|
4411 | # Ones used for FESA 2007 results |
---|
4412 | #LL_LAT = -50.0 |
---|
4413 | #LL_LONG = 80.0 |
---|
4414 | #GRID_SPACING = 1.0/60.0 |
---|
4415 | #LAT_AMOUNT = 4800 |
---|
4416 | #LONG_AMOUNT = 3600 |
---|
4417 | |
---|
4418 | def URS_points_needed_to_file(file_name, boundary_polygon, zone, |
---|
4419 | ll_lat, ll_long, |
---|
4420 | grid_spacing, |
---|
4421 | lat_amount, long_amount, |
---|
4422 | isSouthernHemisphere=True, |
---|
4423 | export_csv=False, use_cache=False, |
---|
4424 | verbose=False): |
---|
4425 | """ |
---|
4426 | Given the info to replicate the URS grid and a polygon output |
---|
4427 | a file that specifies the cloud of boundary points for URS. |
---|
4428 | |
---|
4429 | Note: The polygon cannot cross zones or hemispheres. |
---|
4430 | |
---|
4431 | file_name - name of the urs file produced for David. |
---|
4432 | boundary_polygon - a list of points that describes a polygon. |
---|
4433 | The last point is assumed ot join the first point. |
---|
4434 | This is in UTM (lat long would be better though) |
---|
4435 | |
---|
4436 | This is info about the URS model that needs to be inputted. |
---|
4437 | |
---|
4438 | ll_lat - lower left latitude, in decimal degrees |
---|
4439 | ll-long - lower left longitude, in decimal degrees |
---|
4440 | grid_spacing - in deciamal degrees |
---|
4441 | lat_amount - number of latitudes |
---|
4442 | long_amount- number of longs |
---|
4443 | |
---|
4444 | |
---|
4445 | Don't add the file extension. It will be added. |
---|
4446 | """ |
---|
4447 | geo = URS_points_needed(boundary_polygon, zone, ll_lat, ll_long, |
---|
4448 | grid_spacing, |
---|
4449 | lat_amount, long_amount, isSouthernHemisphere, |
---|
4450 | use_cache, verbose) |
---|
4451 | if not file_name[-4:] == ".urs": |
---|
4452 | file_name += ".urs" |
---|
4453 | geo.export_points_file(file_name, isSouthHemisphere=isSouthernHemisphere) |
---|
4454 | if export_csv: |
---|
4455 | if file_name[-4:] == ".urs": |
---|
4456 | file_name = file_name[:-4] + ".csv" |
---|
4457 | geo.export_points_file(file_name) |
---|
4458 | return geo |
---|
4459 | |
---|
4460 | def URS_points_needed(boundary_polygon, zone, ll_lat, |
---|
4461 | ll_long, grid_spacing, |
---|
4462 | lat_amount, long_amount, isSouthHemisphere=True, |
---|
4463 | use_cache=False, verbose=False): |
---|
4464 | args = (boundary_polygon, |
---|
4465 | zone, ll_lat, |
---|
4466 | ll_long, grid_spacing, |
---|
4467 | lat_amount, long_amount, isSouthHemisphere) |
---|
4468 | kwargs = {} |
---|
4469 | if use_cache is True: |
---|
4470 | try: |
---|
4471 | from anuga.caching import cache |
---|
4472 | except: |
---|
4473 | msg = 'Caching was requested, but caching module'+\ |
---|
4474 | 'could not be imported' |
---|
4475 | raise msg |
---|
4476 | |
---|
4477 | |
---|
4478 | geo = cache(_URS_points_needed, |
---|
4479 | args, kwargs, |
---|
4480 | verbose=verbose, |
---|
4481 | compression=False) |
---|
4482 | else: |
---|
4483 | geo = apply(_URS_points_needed, args, kwargs) |
---|
4484 | |
---|
4485 | return geo |
---|
4486 | |
---|
4487 | def _URS_points_needed(boundary_polygon, |
---|
4488 | zone, ll_lat, |
---|
4489 | ll_long, grid_spacing, |
---|
4490 | lat_amount, long_amount, |
---|
4491 | isSouthHemisphere): |
---|
4492 | """ |
---|
4493 | boundary_polygon - a list of points that describes a polygon. |
---|
4494 | The last point is assumed ot join the first point. |
---|
4495 | This is in UTM (lat long would b better though) |
---|
4496 | |
---|
4497 | ll_lat - lower left latitude, in decimal degrees |
---|
4498 | ll-long - lower left longitude, in decimal degrees |
---|
4499 | grid_spacing - in deciamal degrees |
---|
4500 | |
---|
4501 | """ |
---|
4502 | |
---|
4503 | from sets import ImmutableSet |
---|
4504 | |
---|
4505 | msg = "grid_spacing can not be zero" |
---|
4506 | assert not grid_spacing == 0, msg |
---|
4507 | a = boundary_polygon |
---|
4508 | # List of segments. Each segment is two points. |
---|
4509 | segs = [i and [a[i-1], a[i]] or [a[len(a)-1], a[0]] for i in range(len(a))] |
---|
4510 | # convert the segs to Lat's and longs. |
---|
4511 | |
---|
4512 | # Don't assume the zone of the segments is the same as the lower left |
---|
4513 | # corner of the lat long data!! They can easily be in different zones |
---|
4514 | |
---|
4515 | lat_long_set = ImmutableSet() |
---|
4516 | for seg in segs: |
---|
4517 | points_lat_long = points_needed(seg, ll_lat, ll_long, grid_spacing, |
---|
4518 | lat_amount, long_amount, zone, isSouthHemisphere) |
---|
4519 | lat_long_set |= ImmutableSet(points_lat_long) |
---|
4520 | if lat_long_set == ImmutableSet([]): |
---|
4521 | msg = """URS region specified and polygon does not overlap.""" |
---|
4522 | raise ValueError, msg |
---|
4523 | |
---|
4524 | # Warning there is no info in geospatial saying the hemisphere of |
---|
4525 | # these points. There should be. |
---|
4526 | geo = Geospatial_data(data_points=list(lat_long_set), |
---|
4527 | points_are_lats_longs=True) |
---|
4528 | return geo |
---|
4529 | |
---|
4530 | def points_needed(seg, ll_lat, ll_long, grid_spacing, |
---|
4531 | lat_amount, long_amount, zone, |
---|
4532 | isSouthHemisphere): |
---|
4533 | """ |
---|
4534 | seg is two points, in UTM |
---|
4535 | return a list of the points, in lats and longs that are needed to |
---|
4536 | interpolate any point on the segment. |
---|
4537 | """ |
---|
4538 | from math import sqrt |
---|
4539 | #print "zone",zone |
---|
4540 | geo_reference = Geo_reference(zone=zone) |
---|
4541 | #print "seg", seg |
---|
4542 | geo = Geospatial_data(seg,geo_reference=geo_reference) |
---|
4543 | seg_lat_long = geo.get_data_points(as_lat_long=True, |
---|
4544 | isSouthHemisphere=isSouthHemisphere) |
---|
4545 | #print "seg_lat_long", seg_lat_long |
---|
4546 | # 1.415 = 2^0.5, rounded up.... |
---|
4547 | sqrt_2_rounded_up = 1.415 |
---|
4548 | buffer = sqrt_2_rounded_up * grid_spacing |
---|
4549 | |
---|
4550 | max_lat = max(seg_lat_long[0][0], seg_lat_long[1][0]) + buffer |
---|
4551 | max_long = max(seg_lat_long[0][1], seg_lat_long[1][1]) + buffer |
---|
4552 | min_lat = min(seg_lat_long[0][0], seg_lat_long[1][0]) - buffer |
---|
4553 | min_long = min(seg_lat_long[0][1], seg_lat_long[1][1]) - buffer |
---|
4554 | |
---|
4555 | #print "min_long", min_long |
---|
4556 | #print "ll_long", ll_long |
---|
4557 | #print "grid_spacing", grid_spacing |
---|
4558 | first_row = (min_long - ll_long)/grid_spacing |
---|
4559 | # To round up |
---|
4560 | first_row_long = int(round(first_row + 0.5)) |
---|
4561 | #print "first_row", first_row_long |
---|
4562 | |
---|
4563 | last_row = (max_long - ll_long)/grid_spacing # round down |
---|
4564 | last_row_long = int(round(last_row)) |
---|
4565 | #print "last_row",last_row _long |
---|
4566 | |
---|
4567 | first_row = (min_lat - ll_lat)/grid_spacing |
---|
4568 | # To round up |
---|
4569 | first_row_lat = int(round(first_row + 0.5)) |
---|
4570 | #print "first_row", first_row_lat |
---|
4571 | |
---|
4572 | last_row = (max_lat - ll_lat)/grid_spacing # round down |
---|
4573 | last_row_lat = int(round(last_row)) |
---|
4574 | #print "last_row",last_row_lat |
---|
4575 | |
---|
4576 | # to work out the max distance - |
---|
4577 | # 111120 - horizontal distance between 1 deg latitude. |
---|
4578 | #max_distance = sqrt_2_rounded_up * 111120 * grid_spacing |
---|
4579 | max_distance = 157147.4112 * grid_spacing |
---|
4580 | #print "max_distance", max_distance #2619.12 m for 1 minute |
---|
4581 | points_lat_long = [] |
---|
4582 | # Create a list of the lat long points to include. |
---|
4583 | for index_lat in range(first_row_lat, last_row_lat + 1): |
---|
4584 | for index_long in range(first_row_long, last_row_long + 1): |
---|
4585 | #print "index_lat", index_lat |
---|
4586 | #print "index_long", index_long |
---|
4587 | lat = ll_lat + index_lat*grid_spacing |
---|
4588 | long = ll_long + index_long*grid_spacing |
---|
4589 | |
---|
4590 | #filter here to keep good points |
---|
4591 | if keep_point(lat, long, seg, max_distance): |
---|
4592 | points_lat_long.append((lat, long)) #must be hashable |
---|
4593 | #print "points_lat_long", points_lat_long |
---|
4594 | |
---|
4595 | # Now that we have these points, lets throw ones out that are too far away |
---|
4596 | return points_lat_long |
---|
4597 | |
---|
4598 | def keep_point(lat, long, seg, max_distance): |
---|
4599 | """ |
---|
4600 | seg is two points, UTM |
---|
4601 | """ |
---|
4602 | from math import sqrt |
---|
4603 | _ , x0, y0 = redfearn(lat, long) |
---|
4604 | x1 = seg[0][0] |
---|
4605 | y1 = seg[0][1] |
---|
4606 | x2 = seg[1][0] |
---|
4607 | y2 = seg[1][1] |
---|
4608 | x2_1 = x2-x1 |
---|
4609 | y2_1 = y2-y1 |
---|
4610 | try: |
---|
4611 | d = abs((x2_1)*(y1-y0)-(x1-x0)*(y2_1))/sqrt( \ |
---|
4612 | (x2_1)*(x2_1)+(y2_1)*(y2_1)) |
---|
4613 | except ZeroDivisionError: |
---|
4614 | #print "seg", seg |
---|
4615 | #print "x0", x0 |
---|
4616 | #print "y0", y0 |
---|
4617 | if sqrt((x2_1)*(x2_1)+(y2_1)*(y2_1)) == 0 and \ |
---|
4618 | abs((x2_1)*(y1-y0)-(x1-x0)*(y2_1)) == 0: |
---|
4619 | return True |
---|
4620 | else: |
---|
4621 | return False |
---|
4622 | |
---|
4623 | if d <= max_distance: |
---|
4624 | return True |
---|
4625 | else: |
---|
4626 | return False |
---|
4627 | |
---|
4628 | #### CONVERTING UNGRIDDED URS DATA TO AN SWW FILE #### |
---|
4629 | |
---|
4630 | WAVEHEIGHT_MUX_LABEL = '_waveheight-z-mux' |
---|
4631 | EAST_VELOCITY_LABEL = '_velocity-e-mux' |
---|
4632 | NORTH_VELOCITY_LABEL = '_velocity-n-mux' |
---|
4633 | def urs_ungridded2sww(basename_in='o', basename_out=None, verbose=False, |
---|
4634 | mint=None, maxt=None, |
---|
4635 | mean_stage=0, |
---|
4636 | origin=None, |
---|
4637 | hole_points_UTM=None, |
---|
4638 | zscale=1): |
---|
4639 | """ |
---|
4640 | Convert URS C binary format for wave propagation to |
---|
4641 | sww format native to abstract_2d_finite_volumes. |
---|
4642 | |
---|
4643 | |
---|
4644 | Specify only basename_in and read files of the form |
---|
4645 | basefilename_velocity-z-mux, basefilename_velocity-e-mux and |
---|
4646 | basefilename_waveheight-n-mux containing relative height, |
---|
4647 | x-velocity and y-velocity, respectively. |
---|
4648 | |
---|
4649 | Also convert latitude and longitude to UTM. All coordinates are |
---|
4650 | assumed to be given in the GDA94 datum. The latitude and longitude |
---|
4651 | information is assumed ungridded grid. |
---|
4652 | |
---|
4653 | min's and max's: If omitted - full extend is used. |
---|
4654 | To include a value min ans max may equal it. |
---|
4655 | Lat and lon are assumed to be in decimal degrees. |
---|
4656 | |
---|
4657 | origin is a 3-tuple with geo referenced |
---|
4658 | UTM coordinates (zone, easting, northing) |
---|
4659 | It will be the origin of the sww file. This shouldn't be used, |
---|
4660 | since all of anuga should be able to handle an arbitary origin. |
---|
4661 | The mux point info is NOT relative to this origin. |
---|
4662 | |
---|
4663 | |
---|
4664 | URS C binary format has data orgainised as TIME, LONGITUDE, LATITUDE |
---|
4665 | which means that latitude is the fastest |
---|
4666 | varying dimension (row major order, so to speak) |
---|
4667 | |
---|
4668 | In URS C binary the latitudes and longitudes are in assending order. |
---|
4669 | |
---|
4670 | Note, interpolations of the resulting sww file will be different |
---|
4671 | from results of urs2sww. This is due to the interpolation |
---|
4672 | function used, and the different grid structure between urs2sww |
---|
4673 | and this function. |
---|
4674 | |
---|
4675 | Interpolating data that has an underlying gridded source can |
---|
4676 | easily end up with different values, depending on the underlying |
---|
4677 | mesh. |
---|
4678 | |
---|
4679 | consider these 4 points |
---|
4680 | 50 -50 |
---|
4681 | |
---|
4682 | 0 0 |
---|
4683 | |
---|
4684 | The grid can be |
---|
4685 | - |
---|
4686 | |\| A |
---|
4687 | - |
---|
4688 | or; |
---|
4689 | - |
---|
4690 | |/| B |
---|
4691 | - |
---|
4692 | If a point is just below the center of the midpoint, it will have a |
---|
4693 | +ve value in grid A and a -ve value in grid B. |
---|
4694 | """ |
---|
4695 | from anuga.mesh_engine.mesh_engine import NoTrianglesError |
---|
4696 | from anuga.pmesh.mesh import Mesh |
---|
4697 | |
---|
4698 | files_in = [basename_in + WAVEHEIGHT_MUX_LABEL, |
---|
4699 | basename_in + EAST_VELOCITY_LABEL, |
---|
4700 | basename_in + NORTH_VELOCITY_LABEL] |
---|
4701 | quantities = ['HA','UA','VA'] |
---|
4702 | |
---|
4703 | # instanciate urs_points of the three mux files. |
---|
4704 | mux = {} |
---|
4705 | for quantity, file in map(None, quantities, files_in): |
---|
4706 | mux[quantity] = Urs_points(file) |
---|
4707 | |
---|
4708 | # Could check that the depth is the same. (hashing) |
---|
4709 | |
---|
4710 | # handle to a mux file to do depth stuff |
---|
4711 | a_mux = mux[quantities[0]] |
---|
4712 | |
---|
4713 | # Convert to utm |
---|
4714 | lat = a_mux.lonlatdep[:,1] |
---|
4715 | long = a_mux.lonlatdep[:,0] |
---|
4716 | points_utm, zone = convert_from_latlon_to_utm( \ |
---|
4717 | latitudes=lat, longitudes=long) |
---|
4718 | #print "points_utm", points_utm |
---|
4719 | #print "zone", zone |
---|
4720 | |
---|
4721 | elevation = a_mux.lonlatdep[:,2] * -1 # |
---|
4722 | |
---|
4723 | # grid ( create a mesh from the selected points) |
---|
4724 | # This mesh has a problem. Triangles are streched over ungridded areas. |
---|
4725 | # If these areas could be described as holes in pmesh, that would be great |
---|
4726 | |
---|
4727 | # I can't just get the user to selection a point in the middle. |
---|
4728 | # A boundary is needed around these points. |
---|
4729 | # But if the zone of points is obvious enough auto-segment should do |
---|
4730 | # a good boundary. |
---|
4731 | mesh = Mesh() |
---|
4732 | mesh.add_vertices(points_utm) |
---|
4733 | mesh.auto_segment(smooth_indents=True, expand_pinch=True) |
---|
4734 | # To try and avoid alpha shape 'hugging' too much |
---|
4735 | mesh.auto_segment( mesh.shape.get_alpha()*1.1 ) |
---|
4736 | if hole_points_UTM is not None: |
---|
4737 | point = ensure_absolute(hole_points_UTM) |
---|
4738 | mesh.add_hole(point[0], point[1]) |
---|
4739 | try: |
---|
4740 | mesh.generate_mesh(minimum_triangle_angle=0.0, verbose=False) |
---|
4741 | except NoTrianglesError: |
---|
4742 | # This is a bit of a hack, going in and changing the |
---|
4743 | # data structure. |
---|
4744 | mesh.holes = [] |
---|
4745 | mesh.generate_mesh(minimum_triangle_angle=0.0, verbose=False) |
---|
4746 | mesh_dic = mesh.Mesh2MeshList() |
---|
4747 | |
---|
4748 | #mesh.export_mesh_file(basename_in + '_168.tsh') |
---|
4749 | #import sys; sys.exit() |
---|
4750 | # These are the times of the mux file |
---|
4751 | mux_times = [] |
---|
4752 | for i in range(a_mux.time_step_count): |
---|
4753 | mux_times.append(a_mux.time_step * i) |
---|
4754 | mux_times_start_i, mux_times_fin_i = mux2sww_time(mux_times, mint, maxt) |
---|
4755 | times = mux_times[mux_times_start_i:mux_times_fin_i] |
---|
4756 | |
---|
4757 | if mux_times_start_i == mux_times_fin_i: |
---|
4758 | # Close the mux files |
---|
4759 | for quantity, file in map(None, quantities, files_in): |
---|
4760 | mux[quantity].close() |
---|
4761 | msg="Due to mint and maxt there's no time info in the boundary SWW." |
---|
4762 | raise Exception, msg |
---|
4763 | |
---|
4764 | # If this raise is removed there is currently no downstream errors |
---|
4765 | |
---|
4766 | points_utm=ensure_numeric(points_utm) |
---|
4767 | assert ensure_numeric(mesh_dic['generatedpointlist']) == \ |
---|
4768 | ensure_numeric(points_utm) |
---|
4769 | |
---|
4770 | volumes = mesh_dic['generatedtrianglelist'] |
---|
4771 | |
---|
4772 | # write sww intro and grid stuff. |
---|
4773 | if basename_out is None: |
---|
4774 | swwname = basename_in + '.sww' |
---|
4775 | else: |
---|
4776 | swwname = basename_out + '.sww' |
---|
4777 | |
---|
4778 | if verbose: print 'Output to ', swwname |
---|
4779 | outfile = NetCDFFile(swwname, 'w') |
---|
4780 | # For a different way of doing this, check out tsh2sww |
---|
4781 | # work out sww_times and the index range this covers |
---|
4782 | sww = Write_sww() |
---|
4783 | sww.store_header(outfile, times, len(volumes), len(points_utm), |
---|
4784 | verbose=verbose,sww_precision=Float) |
---|
4785 | outfile.mean_stage = mean_stage |
---|
4786 | outfile.zscale = zscale |
---|
4787 | |
---|
4788 | sww.store_triangulation(outfile, points_utm, volumes, |
---|
4789 | elevation, zone, new_origin=origin, |
---|
4790 | verbose=verbose) |
---|
4791 | |
---|
4792 | if verbose: print 'Converting quantities' |
---|
4793 | j = 0 |
---|
4794 | # Read in a time slice from each mux file and write it to the sww file |
---|
4795 | for ha, ua, va in map(None, mux['HA'], mux['UA'], mux['VA']): |
---|
4796 | if j >= mux_times_start_i and j < mux_times_fin_i: |
---|
4797 | stage = zscale*ha + mean_stage |
---|
4798 | h = stage - elevation |
---|
4799 | xmomentum = ua*h |
---|
4800 | ymomentum = -1*va*h # -1 since in mux files south is positive. |
---|
4801 | sww.store_quantities(outfile, |
---|
4802 | slice_index=j - mux_times_start_i, |
---|
4803 | verbose=verbose, |
---|
4804 | stage=stage, |
---|
4805 | xmomentum=xmomentum, |
---|
4806 | ymomentum=ymomentum, |
---|
4807 | sww_precision=Float) |
---|
4808 | j += 1 |
---|
4809 | if verbose: sww.verbose_quantities(outfile) |
---|
4810 | outfile.close() |
---|
4811 | # |
---|
4812 | # Do some conversions while writing the sww file |
---|
4813 | |
---|
4814 | |
---|
4815 | def mux2sww_time(mux_times, mint, maxt): |
---|
4816 | """ |
---|
4817 | """ |
---|
4818 | |
---|
4819 | if mint == None: |
---|
4820 | mux_times_start_i = 0 |
---|
4821 | else: |
---|
4822 | mux_times_start_i = searchsorted(mux_times, mint) |
---|
4823 | |
---|
4824 | if maxt == None: |
---|
4825 | mux_times_fin_i = len(mux_times) |
---|
4826 | else: |
---|
4827 | maxt += 0.5 # so if you specify a time where there is |
---|
4828 | # data that time will be included |
---|
4829 | mux_times_fin_i = searchsorted(mux_times, maxt) |
---|
4830 | |
---|
4831 | return mux_times_start_i, mux_times_fin_i |
---|
4832 | |
---|
4833 | |
---|
4834 | class Write_sww: |
---|
4835 | from anuga.shallow_water.shallow_water_domain import Domain |
---|
4836 | |
---|
4837 | # FIXME (Ole): Hardwiring the conserved quantities like |
---|
4838 | # this could be a problem. I would prefer taking them from |
---|
4839 | # the instantiation of Domain. |
---|
4840 | # |
---|
4841 | # (DSG) There is not always a Domain instance when Write_sww is used. |
---|
4842 | # Check to see if this is the same level of hardwiring as is in |
---|
4843 | # shallow water doamain. |
---|
4844 | |
---|
4845 | sww_quantities = Domain.conserved_quantities |
---|
4846 | |
---|
4847 | |
---|
4848 | RANGE = '_range' |
---|
4849 | EXTREMA = ':extrema' |
---|
4850 | |
---|
4851 | def __init__(self): |
---|
4852 | pass |
---|
4853 | |
---|
4854 | def store_header(self, |
---|
4855 | outfile, |
---|
4856 | times, |
---|
4857 | number_of_volumes, |
---|
4858 | number_of_points, |
---|
4859 | description='Converted from XXX', |
---|
4860 | smoothing=True, |
---|
4861 | order=1, |
---|
4862 | sww_precision=Float32, |
---|
4863 | verbose=False): |
---|
4864 | """ |
---|
4865 | outfile - the name of the file that will be written |
---|
4866 | times - A list of the time slice times OR a start time |
---|
4867 | Note, if a list is given the info will be made relative. |
---|
4868 | number_of_volumes - the number of triangles |
---|
4869 | """ |
---|
4870 | |
---|
4871 | outfile.institution = 'Geoscience Australia' |
---|
4872 | outfile.description = description |
---|
4873 | |
---|
4874 | # For sww compatibility |
---|
4875 | if smoothing is True: |
---|
4876 | # Smoothing to be depreciated |
---|
4877 | outfile.smoothing = 'Yes' |
---|
4878 | outfile.vertices_are_stored_uniquely = 'False' |
---|
4879 | else: |
---|
4880 | # Smoothing to be depreciated |
---|
4881 | outfile.smoothing = 'No' |
---|
4882 | outfile.vertices_are_stored_uniquely = 'True' |
---|
4883 | outfile.order = order |
---|
4884 | |
---|
4885 | try: |
---|
4886 | revision_number = get_revision_number() |
---|
4887 | except: |
---|
4888 | revision_number = None |
---|
4889 | # Allow None to be stored as a string |
---|
4890 | outfile.revision_number = str(revision_number) |
---|
4891 | |
---|
4892 | |
---|
4893 | |
---|
4894 | # times - A list or array of the time slice times OR a start time |
---|
4895 | # times = ensure_numeric(times) |
---|
4896 | # Start time in seconds since the epoch (midnight 1/1/1970) |
---|
4897 | |
---|
4898 | # This is being used to seperate one number from a list. |
---|
4899 | # what it is actually doing is sorting lists from numeric arrays. |
---|
4900 | if type(times) is list or type(times) is ArrayType: |
---|
4901 | number_of_times = len(times) |
---|
4902 | times = ensure_numeric(times) |
---|
4903 | if number_of_times == 0: |
---|
4904 | starttime = 0 |
---|
4905 | else: |
---|
4906 | starttime = times[0] |
---|
4907 | times = times - starttime #Store relative times |
---|
4908 | else: |
---|
4909 | number_of_times = 0 |
---|
4910 | starttime = times |
---|
4911 | #times = ensure_numeric([]) |
---|
4912 | outfile.starttime = starttime |
---|
4913 | # dimension definitions |
---|
4914 | outfile.createDimension('number_of_volumes', number_of_volumes) |
---|
4915 | outfile.createDimension('number_of_vertices', 3) |
---|
4916 | outfile.createDimension('numbers_in_range', 2) |
---|
4917 | |
---|
4918 | if smoothing is True: |
---|
4919 | outfile.createDimension('number_of_points', number_of_points) |
---|
4920 | |
---|
4921 | # FIXME(Ole): This will cause sww files for paralle domains to |
---|
4922 | # have ghost nodes stored (but not used by triangles). |
---|
4923 | # To clean this up, we have to change get_vertex_values and |
---|
4924 | # friends in quantity.py (but I can't be bothered right now) |
---|
4925 | else: |
---|
4926 | outfile.createDimension('number_of_points', 3*number_of_volumes) |
---|
4927 | outfile.createDimension('number_of_timesteps', number_of_times) |
---|
4928 | |
---|
4929 | # variable definitions |
---|
4930 | outfile.createVariable('x', sww_precision, ('number_of_points',)) |
---|
4931 | outfile.createVariable('y', sww_precision, ('number_of_points',)) |
---|
4932 | outfile.createVariable('elevation', sww_precision, ('number_of_points',)) |
---|
4933 | q = 'elevation' |
---|
4934 | outfile.createVariable(q+Write_sww.RANGE, sww_precision, |
---|
4935 | ('numbers_in_range',)) |
---|
4936 | |
---|
4937 | |
---|
4938 | # Initialise ranges with small and large sentinels. |
---|
4939 | # If this was in pure Python we could have used None sensibly |
---|
4940 | outfile.variables[q+Write_sww.RANGE][0] = max_float # Min |
---|
4941 | outfile.variables[q+Write_sww.RANGE][1] = -max_float # Max |
---|
4942 | |
---|
4943 | # FIXME: Backwards compatibility |
---|
4944 | outfile.createVariable('z', sww_precision, ('number_of_points',)) |
---|
4945 | ################################# |
---|
4946 | |
---|
4947 | outfile.createVariable('volumes', Int, ('number_of_volumes', |
---|
4948 | 'number_of_vertices')) |
---|
4949 | # Doing sww_precision instead of Float gives cast errors. |
---|
4950 | outfile.createVariable('time', Float, |
---|
4951 | ('number_of_timesteps',)) |
---|
4952 | |
---|
4953 | for q in Write_sww.sww_quantities: |
---|
4954 | outfile.createVariable(q, sww_precision, |
---|
4955 | ('number_of_timesteps', |
---|
4956 | 'number_of_points')) |
---|
4957 | outfile.createVariable(q+Write_sww.RANGE, sww_precision, |
---|
4958 | ('numbers_in_range',)) |
---|
4959 | |
---|
4960 | # Initialise ranges with small and large sentinels. |
---|
4961 | # If this was in pure Python we could have used None sensibly |
---|
4962 | outfile.variables[q+Write_sww.RANGE][0] = max_float # Min |
---|
4963 | outfile.variables[q+Write_sww.RANGE][1] = -max_float # Max |
---|
4964 | |
---|
4965 | if type(times) is list or type(times) is ArrayType: |
---|
4966 | outfile.variables['time'][:] = times #Store time relative |
---|
4967 | |
---|
4968 | if verbose: |
---|
4969 | print '------------------------------------------------' |
---|
4970 | print 'Statistics:' |
---|
4971 | print ' t in [%f, %f], len(t) == %d'\ |
---|
4972 | %(min(times.flat), max(times.flat), len(times.flat)) |
---|
4973 | |
---|
4974 | |
---|
4975 | def store_triangulation(self, |
---|
4976 | outfile, |
---|
4977 | points_utm, |
---|
4978 | volumes, |
---|
4979 | elevation, zone=None, new_origin=None, |
---|
4980 | points_georeference=None, verbose=False): |
---|
4981 | """ |
---|
4982 | |
---|
4983 | new_origin - qa georeference that the points can be set to. (Maybe |
---|
4984 | do this before calling this function.) |
---|
4985 | |
---|
4986 | points_utm - currently a list or array of the points in UTM. |
---|
4987 | points_georeference - the georeference of the points_utm |
---|
4988 | |
---|
4989 | How about passing new_origin and current_origin. |
---|
4990 | If you get both, do a convertion from the old to the new. |
---|
4991 | |
---|
4992 | If you only get new_origin, the points are absolute, |
---|
4993 | convert to relative |
---|
4994 | |
---|
4995 | if you only get the current_origin the points are relative, store |
---|
4996 | as relative. |
---|
4997 | |
---|
4998 | if you get no georefs create a new georef based on the minimums of |
---|
4999 | points_utm. (Another option would be to default to absolute) |
---|
5000 | |
---|
5001 | Yes, and this is done in another part of the code. |
---|
5002 | Probably geospatial. |
---|
5003 | |
---|
5004 | If you don't supply either geo_refs, then supply a zone. If not |
---|
5005 | the default zone will be used. |
---|
5006 | |
---|
5007 | |
---|
5008 | precon |
---|
5009 | |
---|
5010 | header has been called. |
---|
5011 | """ |
---|
5012 | |
---|
5013 | number_of_points = len(points_utm) |
---|
5014 | volumes = array(volumes) |
---|
5015 | points_utm = array(points_utm) |
---|
5016 | |
---|
5017 | # given the two geo_refs and the points, do the stuff |
---|
5018 | # described in the method header |
---|
5019 | # if this is needed else where, pull out as a function |
---|
5020 | points_georeference = ensure_geo_reference(points_georeference) |
---|
5021 | new_origin = ensure_geo_reference(new_origin) |
---|
5022 | if new_origin is None and points_georeference is not None: |
---|
5023 | points = points_utm |
---|
5024 | geo_ref = points_georeference |
---|
5025 | else: |
---|
5026 | if new_origin is None: |
---|
5027 | new_origin = Geo_reference(zone,min(points_utm[:,0]), |
---|
5028 | min(points_utm[:,1])) |
---|
5029 | points = new_origin.change_points_geo_ref(points_utm, |
---|
5030 | points_georeference) |
---|
5031 | geo_ref = new_origin |
---|
5032 | |
---|
5033 | # At this stage I need a georef and points |
---|
5034 | # the points are relative to the georef |
---|
5035 | geo_ref.write_NetCDF(outfile) |
---|
5036 | |
---|
5037 | # This will put the geo ref in the middle |
---|
5038 | #geo_ref=Geo_reference(refzone,(max(x)+min(x))/2.0,(max(x)+min(y))/2.) |
---|
5039 | |
---|
5040 | x = points[:,0] |
---|
5041 | y = points[:,1] |
---|
5042 | z = outfile.variables['z'][:] |
---|
5043 | |
---|
5044 | if verbose: |
---|
5045 | print '------------------------------------------------' |
---|
5046 | print 'More Statistics:' |
---|
5047 | print ' Extent (/lon):' |
---|
5048 | print ' x in [%f, %f], len(lat) == %d'\ |
---|
5049 | %(min(x), max(x), |
---|
5050 | len(x)) |
---|
5051 | print ' y in [%f, %f], len(lon) == %d'\ |
---|
5052 | %(min(y), max(y), |
---|
5053 | len(y)) |
---|
5054 | print ' z in [%f, %f], len(z) == %d'\ |
---|
5055 | %(min(elevation), max(elevation), |
---|
5056 | len(elevation)) |
---|
5057 | print 'geo_ref: ',geo_ref |
---|
5058 | print '------------------------------------------------' |
---|
5059 | |
---|
5060 | #z = resize(bath_grid,outfile.variables['z'][:].shape) |
---|
5061 | #print "points[:,0]", points[:,0] |
---|
5062 | outfile.variables['x'][:] = points[:,0] #- geo_ref.get_xllcorner() |
---|
5063 | outfile.variables['y'][:] = points[:,1] #- geo_ref.get_yllcorner() |
---|
5064 | outfile.variables['z'][:] = elevation |
---|
5065 | outfile.variables['elevation'][:] = elevation #FIXME HACK |
---|
5066 | outfile.variables['volumes'][:] = volumes.astype(Int32) #On Opteron 64 |
---|
5067 | |
---|
5068 | q = 'elevation' |
---|
5069 | # This updates the _range values |
---|
5070 | outfile.variables[q+Write_sww.RANGE][0] = min(elevation) |
---|
5071 | outfile.variables[q+Write_sww.RANGE][1] = max(elevation) |
---|
5072 | |
---|
5073 | |
---|
5074 | def store_quantities(self, outfile, sww_precision=Float32, |
---|
5075 | slice_index=None, time=None, |
---|
5076 | verbose=False, **quant): |
---|
5077 | """ |
---|
5078 | Write the quantity info. |
---|
5079 | |
---|
5080 | **quant is extra keyword arguments passed in. These must be |
---|
5081 | the sww quantities, currently; stage, xmomentum, ymomentum. |
---|
5082 | |
---|
5083 | if the time array is already been built, use the slice_index |
---|
5084 | to specify the index. |
---|
5085 | |
---|
5086 | Otherwise, use time to increase the time dimension |
---|
5087 | |
---|
5088 | Maybe make this general, but the viewer assumes these quantities, |
---|
5089 | so maybe we don't want it general - unless the viewer is general |
---|
5090 | |
---|
5091 | precon |
---|
5092 | triangulation and |
---|
5093 | header have been called. |
---|
5094 | """ |
---|
5095 | |
---|
5096 | if time is not None: |
---|
5097 | file_time = outfile.variables['time'] |
---|
5098 | slice_index = len(file_time) |
---|
5099 | file_time[slice_index] = time |
---|
5100 | |
---|
5101 | # Write the conserved quantities from Domain. |
---|
5102 | # Typically stage, xmomentum, ymomentum |
---|
5103 | # other quantities will be ignored, silently. |
---|
5104 | # Also write the ranges: stage_range, |
---|
5105 | # xmomentum_range and ymomentum_range |
---|
5106 | for q in Write_sww.sww_quantities: |
---|
5107 | if not quant.has_key(q): |
---|
5108 | msg = 'SWW file can not write quantity %s' %q |
---|
5109 | raise NewQuantity, msg |
---|
5110 | else: |
---|
5111 | q_values = quant[q] |
---|
5112 | outfile.variables[q][slice_index] = \ |
---|
5113 | q_values.astype(sww_precision) |
---|
5114 | |
---|
5115 | # This updates the _range values |
---|
5116 | q_range = outfile.variables[q+Write_sww.RANGE][:] |
---|
5117 | q_values_min = min(q_values) |
---|
5118 | if q_values_min < q_range[0]: |
---|
5119 | outfile.variables[q+Write_sww.RANGE][0] = q_values_min |
---|
5120 | q_values_max = max(q_values) |
---|
5121 | if q_values_max > q_range[1]: |
---|
5122 | outfile.variables[q+Write_sww.RANGE][1] = q_values_max |
---|
5123 | |
---|
5124 | def verbose_quantities(self, outfile): |
---|
5125 | print '------------------------------------------------' |
---|
5126 | print 'More Statistics:' |
---|
5127 | for q in Write_sww.sww_quantities: |
---|
5128 | print ' %s in [%f, %f]' %(q, |
---|
5129 | outfile.variables[q+Write_sww.RANGE][0], |
---|
5130 | outfile.variables[q+Write_sww.RANGE][1]) |
---|
5131 | print '------------------------------------------------' |
---|
5132 | |
---|
5133 | |
---|
5134 | |
---|
5135 | def obsolete_write_sww_time_slices(outfile, has, uas, vas, elevation, |
---|
5136 | mean_stage=0, zscale=1, |
---|
5137 | verbose=False): |
---|
5138 | #Time stepping |
---|
5139 | stage = outfile.variables['stage'] |
---|
5140 | xmomentum = outfile.variables['xmomentum'] |
---|
5141 | ymomentum = outfile.variables['ymomentum'] |
---|
5142 | |
---|
5143 | n = len(has) |
---|
5144 | j=0 |
---|
5145 | for ha, ua, va in map(None, has, uas, vas): |
---|
5146 | if verbose and j%((n+10)/10)==0: print ' Doing %d of %d' %(j, n) |
---|
5147 | w = zscale*ha + mean_stage |
---|
5148 | stage[j] = w |
---|
5149 | h = w - elevation |
---|
5150 | xmomentum[j] = ua*h |
---|
5151 | ymomentum[j] = -1*va*h # -1 since in mux files south is positive. |
---|
5152 | j += 1 |
---|
5153 | |
---|
5154 | def urs2txt(basename_in, location_index=None): |
---|
5155 | """ |
---|
5156 | Not finished or tested |
---|
5157 | """ |
---|
5158 | |
---|
5159 | files_in = [basename_in + WAVEHEIGHT_MUX_LABEL, |
---|
5160 | basename_in + EAST_VELOCITY_LABEL, |
---|
5161 | basename_in + NORTH_VELOCITY_LABEL] |
---|
5162 | quantities = ['HA','UA','VA'] |
---|
5163 | |
---|
5164 | d = "," |
---|
5165 | |
---|
5166 | # instanciate urs_points of the three mux files. |
---|
5167 | mux = {} |
---|
5168 | for quantity, file in map(None, quantities, files_in): |
---|
5169 | mux[quantity] = Urs_points(file) |
---|
5170 | |
---|
5171 | # Could check that the depth is the same. (hashing) |
---|
5172 | |
---|
5173 | # handle to a mux file to do depth stuff |
---|
5174 | a_mux = mux[quantities[0]] |
---|
5175 | |
---|
5176 | # Convert to utm |
---|
5177 | latitudes = a_mux.lonlatdep[:,1] |
---|
5178 | longitudes = a_mux.lonlatdep[:,0] |
---|
5179 | points_utm, zone = convert_from_latlon_to_utm( \ |
---|
5180 | latitudes=latitudes, longitudes=longitudes) |
---|
5181 | #print "points_utm", points_utm |
---|
5182 | #print "zone", zone |
---|
5183 | depths = a_mux.lonlatdep[:,2] # |
---|
5184 | |
---|
5185 | fid = open(basename_in + '.txt', 'w') |
---|
5186 | |
---|
5187 | fid.write("zone: " + str(zone) + "\n") |
---|
5188 | |
---|
5189 | if location_index is not None: |
---|
5190 | #Title |
---|
5191 | li = location_index |
---|
5192 | fid.write('location_index'+d+'lat'+d+ 'long' +d+ 'Easting' +d+ \ |
---|
5193 | 'Northing' + "\n") |
---|
5194 | fid.write(str(li) +d+ str(latitudes[li])+d+ \ |
---|
5195 | str(longitudes[li]) +d+ str(points_utm[li][0]) +d+ \ |
---|
5196 | str(points_utm[li][01]) + "\n") |
---|
5197 | |
---|
5198 | # the non-time dependent stuff |
---|
5199 | #Title |
---|
5200 | fid.write('location_index'+d+'lat'+d+ 'long' +d+ 'Easting' +d+ \ |
---|
5201 | 'Northing' +d+ 'depth m' + "\n") |
---|
5202 | i = 0 |
---|
5203 | for depth, point_utm, lat, long in map(None, depths, |
---|
5204 | points_utm, latitudes, |
---|
5205 | longitudes): |
---|
5206 | |
---|
5207 | fid.write(str(i) +d+ str(lat)+d+ str(long) +d+ str(point_utm[0]) +d+ \ |
---|
5208 | str(point_utm[01]) +d+ str(depth) + "\n") |
---|
5209 | i +=1 |
---|
5210 | #Time dependent |
---|
5211 | if location_index is not None: |
---|
5212 | time_step = a_mux.time_step |
---|
5213 | i = 0 |
---|
5214 | #Title |
---|
5215 | fid.write('time' +d+ 'HA depth m'+d+ \ |
---|
5216 | 'UA momentum East x m/sec' +d+ 'VA momentum North y m/sec' \ |
---|
5217 | + "\n") |
---|
5218 | for HA, UA, VA in map(None, mux['HA'], mux['UA'], mux['VA']): |
---|
5219 | fid.write(str(i*time_step) +d+ str(HA[location_index])+d+ \ |
---|
5220 | str(UA[location_index]) +d+ str(VA[location_index]) \ |
---|
5221 | + "\n") |
---|
5222 | |
---|
5223 | i +=1 |
---|
5224 | |
---|
5225 | class Urs_points: |
---|
5226 | """ |
---|
5227 | Read the info in URS mux files. |
---|
5228 | |
---|
5229 | for the quantities heres a correlation between the file names and |
---|
5230 | what they mean; |
---|
5231 | z-mux is height above sea level, m |
---|
5232 | e-mux is velocity is Eastern direction, m/s |
---|
5233 | n-mux is velocity is Northern direction, m/s |
---|
5234 | """ |
---|
5235 | def __init__(self,urs_file): |
---|
5236 | self.iterated = False |
---|
5237 | columns = 3 # long, lat , depth |
---|
5238 | mux_file = open(urs_file, 'rb') |
---|
5239 | |
---|
5240 | # Number of points/stations |
---|
5241 | (self.points_num,)= unpack('i',mux_file.read(4)) |
---|
5242 | |
---|
5243 | # nt, int - Number of time steps |
---|
5244 | (self.time_step_count,)= unpack('i',mux_file.read(4)) |
---|
5245 | #print "self.time_step_count", self.time_step_count |
---|
5246 | #dt, float - time step, seconds |
---|
5247 | (self.time_step,) = unpack('f', mux_file.read(4)) |
---|
5248 | #print "self.time_step", self.time_step |
---|
5249 | msg = "Bad data in the urs file." |
---|
5250 | if self.points_num < 0: |
---|
5251 | mux_file.close() |
---|
5252 | raise ANUGAError, msg |
---|
5253 | if self.time_step_count < 0: |
---|
5254 | mux_file.close() |
---|
5255 | raise ANUGAError, msg |
---|
5256 | if self.time_step < 0: |
---|
5257 | mux_file.close() |
---|
5258 | raise ANUGAError, msg |
---|
5259 | |
---|
5260 | # the depth is in meters, and it is the distance from the ocean |
---|
5261 | # to the sea bottom. |
---|
5262 | lonlatdep = p_array.array('f') |
---|
5263 | lonlatdep.read(mux_file, columns * self.points_num) |
---|
5264 | lonlatdep = array(lonlatdep, typecode=Float) |
---|
5265 | lonlatdep = reshape(lonlatdep, (self.points_num, columns)) |
---|
5266 | #print 'lonlatdep',lonlatdep |
---|
5267 | self.lonlatdep = lonlatdep |
---|
5268 | |
---|
5269 | self.mux_file = mux_file |
---|
5270 | # check this array |
---|
5271 | |
---|
5272 | def __iter__(self): |
---|
5273 | """ |
---|
5274 | iterate over quantity data which is with respect to time. |
---|
5275 | |
---|
5276 | Note: You can only interate once over an object |
---|
5277 | |
---|
5278 | returns quantity infomation for each time slice |
---|
5279 | """ |
---|
5280 | msg = "You can only interate once over a urs file." |
---|
5281 | assert not self.iterated, msg |
---|
5282 | self.iter_time_step = 0 |
---|
5283 | self.iterated = True |
---|
5284 | return self |
---|
5285 | |
---|
5286 | def next(self): |
---|
5287 | if self.time_step_count == self.iter_time_step: |
---|
5288 | self.close() |
---|
5289 | raise StopIteration |
---|
5290 | #Read in a time slice from mux file |
---|
5291 | hz_p_array = p_array.array('f') |
---|
5292 | hz_p_array.read(self.mux_file, self.points_num) |
---|
5293 | hz_p = array(hz_p_array, typecode=Float) |
---|
5294 | self.iter_time_step += 1 |
---|
5295 | |
---|
5296 | return hz_p |
---|
5297 | |
---|
5298 | def close(self): |
---|
5299 | self.mux_file.close() |
---|
5300 | |
---|
5301 | #### END URS UNGRIDDED 2 SWW ### |
---|
5302 | |
---|
5303 | |
---|
5304 | def start_screen_catcher(dir_name=None, myid='', numprocs='', extra_info='', |
---|
5305 | verbose=True): |
---|
5306 | """ |
---|
5307 | Used to store screen output and errors to file, if run on multiple |
---|
5308 | processes eachprocessor will have its own output and error file. |
---|
5309 | |
---|
5310 | extra_info - is used as a string that can identify outputs with another |
---|
5311 | string eg. '_other' |
---|
5312 | |
---|
5313 | FIXME: Would be good if you could suppress all the screen output and |
---|
5314 | only save it to file... however it seems a bit tricky as this capture |
---|
5315 | techique response to sys.stdout and by this time it is already printed out. |
---|
5316 | """ |
---|
5317 | |
---|
5318 | import sys |
---|
5319 | # dir_name = dir_name |
---|
5320 | if dir_name == None: |
---|
5321 | dir_name=getcwd() |
---|
5322 | |
---|
5323 | if access(dir_name,W_OK) == 0: |
---|
5324 | if verbose: print 'Making directory %s' %dir_name |
---|
5325 | # if verbose: print "myid", myid |
---|
5326 | mkdir (dir_name,0777) |
---|
5327 | |
---|
5328 | if myid <>'': |
---|
5329 | myid = '_'+str(myid) |
---|
5330 | if numprocs <>'': |
---|
5331 | numprocs = '_'+str(numprocs) |
---|
5332 | if extra_info <>'': |
---|
5333 | extra_info = '_'+str(extra_info) |
---|
5334 | # print 'hello1' |
---|
5335 | screen_output_name = join(dir_name, "screen_output%s%s%s.txt" %(myid, |
---|
5336 | numprocs, |
---|
5337 | extra_info)) |
---|
5338 | screen_error_name = join(dir_name, "screen_error%s%s%s.txt" %(myid, |
---|
5339 | numprocs, |
---|
5340 | extra_info)) |
---|
5341 | |
---|
5342 | if verbose: print 'Starting ScreenCatcher, all output will be stored in %s' \ |
---|
5343 | %(screen_output_name) |
---|
5344 | #used to catch screen output to file |
---|
5345 | sys.stdout = Screen_Catcher(screen_output_name) |
---|
5346 | sys.stderr = Screen_Catcher(screen_error_name) |
---|
5347 | |
---|
5348 | class Screen_Catcher: |
---|
5349 | """this simply catches the screen output and stores it to file defined by |
---|
5350 | start_screen_catcher (above) |
---|
5351 | """ |
---|
5352 | |
---|
5353 | def __init__(self, filename): |
---|
5354 | self.filename = filename |
---|
5355 | # print 'init' |
---|
5356 | if exists(self.filename)is True: |
---|
5357 | print'Old existing file "%s" has been deleted' %(self.filename) |
---|
5358 | remove(self.filename) |
---|
5359 | |
---|
5360 | def write(self, stuff): |
---|
5361 | fid = open(self.filename, 'a') |
---|
5362 | fid.write(stuff) |
---|
5363 | fid.close() |
---|
5364 | |
---|
5365 | def copy_code_files(dir_name, filename1, filename2=None): |
---|
5366 | """Copies "filename1" and "filename2" to "dir_name". Very useful for |
---|
5367 | information management |
---|
5368 | filename1 and filename2 are both absolute pathnames |
---|
5369 | """ |
---|
5370 | |
---|
5371 | if access(dir_name,F_OK) == 0: |
---|
5372 | print 'Make directory %s' %dir_name |
---|
5373 | mkdir (dir_name,0777) |
---|
5374 | shutil.copy(filename1, dir_name + sep + basename(filename1)) |
---|
5375 | if filename2!=None: |
---|
5376 | shutil.copy(filename2, dir_name + sep + basename(filename2)) |
---|
5377 | print 'Files %s and %s copied' %(filename1, filename2) |
---|
5378 | else: |
---|
5379 | print 'File %s copied' %(filename1) |
---|
5380 | |
---|
5381 | def get_data_from_file(filename,separator_value = ','): |
---|
5382 | """ |
---|
5383 | Read in data information from file and |
---|
5384 | |
---|
5385 | Returns: |
---|
5386 | header_fields, a string? of the first line separated |
---|
5387 | by the 'separator_value' |
---|
5388 | |
---|
5389 | data, a array (N data columns X M lines) in the file |
---|
5390 | excluding the header |
---|
5391 | |
---|
5392 | NOTE: wont deal with columns with different lenghts and there must be |
---|
5393 | no blank lines at the end. |
---|
5394 | """ |
---|
5395 | |
---|
5396 | fid = open(filename) |
---|
5397 | lines = fid.readlines() |
---|
5398 | |
---|
5399 | fid.close() |
---|
5400 | |
---|
5401 | header_line = lines[0] |
---|
5402 | header_fields = header_line.split(separator_value) |
---|
5403 | |
---|
5404 | #array to store data, number in there is to allow float... |
---|
5405 | #i'm sure there is a better way! |
---|
5406 | data=array([],typecode=Float) |
---|
5407 | data=resize(data,((len(lines)-1),len(header_fields))) |
---|
5408 | # print 'number of fields',range(len(header_fields)) |
---|
5409 | # print 'number of lines',len(lines), shape(data) |
---|
5410 | # print'data',data[1,1],header_line |
---|
5411 | |
---|
5412 | array_number = 0 |
---|
5413 | line_number = 1 |
---|
5414 | while line_number < (len(lines)): |
---|
5415 | for i in range(len(header_fields)): |
---|
5416 | #this get line below the header, explaining the +1 |
---|
5417 | #and also the line_number can be used as the array index |
---|
5418 | fields = lines[line_number].split(separator_value) |
---|
5419 | #assign to array |
---|
5420 | data[array_number,i] = float(fields[i]) |
---|
5421 | |
---|
5422 | line_number = line_number +1 |
---|
5423 | array_number = array_number +1 |
---|
5424 | |
---|
5425 | return header_fields, data |
---|
5426 | |
---|
5427 | def store_parameters(verbose=False,**kwargs): |
---|
5428 | """ |
---|
5429 | Store "kwargs" into a temp csv file, if "completed" is a kwargs csv file is |
---|
5430 | kwargs[file_name] else it is kwargs[output_dir] + details_temp.csv |
---|
5431 | |
---|
5432 | Must have a file_name keyword arg, this is what is writing to. |
---|
5433 | might be a better way to do this using CSV module Writer and writeDict |
---|
5434 | |
---|
5435 | writes file to "output_dir" unless "completed" is in kwargs, then |
---|
5436 | it writes to "file_name" kwargs |
---|
5437 | |
---|
5438 | """ |
---|
5439 | import types |
---|
5440 | # import os |
---|
5441 | |
---|
5442 | # Check that kwargs is a dictionary |
---|
5443 | if type(kwargs) != types.DictType: |
---|
5444 | raise TypeError |
---|
5445 | |
---|
5446 | #is completed is kwargs? |
---|
5447 | try: |
---|
5448 | kwargs['completed'] |
---|
5449 | completed=True |
---|
5450 | except: |
---|
5451 | completed=False |
---|
5452 | |
---|
5453 | #get file name and removes from dict and assert that a file_name exists |
---|
5454 | if completed: |
---|
5455 | try: |
---|
5456 | file = str(kwargs['file_name']) |
---|
5457 | except: |
---|
5458 | raise 'kwargs must have file_name' |
---|
5459 | else: |
---|
5460 | #write temp file in output directory |
---|
5461 | try: |
---|
5462 | file = str(kwargs['output_dir'])+'detail_temp.csv' |
---|
5463 | except: |
---|
5464 | raise 'kwargs must have output_dir' |
---|
5465 | |
---|
5466 | #extracts the header info and the new line info |
---|
5467 | line='' |
---|
5468 | header='' |
---|
5469 | count=0 |
---|
5470 | keys = kwargs.keys() |
---|
5471 | keys.sort() |
---|
5472 | |
---|
5473 | #used the sorted keys to create the header and line data |
---|
5474 | for k in keys: |
---|
5475 | # print "%s = %s" %(k, kwargs[k]) |
---|
5476 | header = header+str(k) |
---|
5477 | line = line+str(kwargs[k]) |
---|
5478 | count+=1 |
---|
5479 | if count <len(kwargs): |
---|
5480 | header = header+',' |
---|
5481 | line = line+',' |
---|
5482 | header+='\n' |
---|
5483 | line+='\n' |
---|
5484 | |
---|
5485 | # checks the header info, if the same, then write, if not create a new file |
---|
5486 | #try to open! |
---|
5487 | try: |
---|
5488 | fid = open(file,"r") |
---|
5489 | file_header=fid.readline() |
---|
5490 | fid.close() |
---|
5491 | if verbose: print 'read file header %s' %file_header |
---|
5492 | |
---|
5493 | except: |
---|
5494 | msg = 'try to create new file',file |
---|
5495 | if verbose: print msg |
---|
5496 | #tries to open file, maybe directory is bad |
---|
5497 | try: |
---|
5498 | fid = open(file,"w") |
---|
5499 | fid.write(header) |
---|
5500 | fid.close() |
---|
5501 | file_header=header |
---|
5502 | except: |
---|
5503 | msg = 'cannot create new file',file |
---|
5504 | raise msg |
---|
5505 | |
---|
5506 | #if header is same or this is a new file |
---|
5507 | if file_header==str(header): |
---|
5508 | fid=open(file,"a") |
---|
5509 | #write new line |
---|
5510 | fid.write(line) |
---|
5511 | fid.close() |
---|
5512 | else: |
---|
5513 | #backup plan, |
---|
5514 | # if header is different and has completed will append info to |
---|
5515 | #end of details_temp.cvs file in output directory |
---|
5516 | file = str(kwargs['output_dir'])+'detail_temp.csv' |
---|
5517 | fid=open(file,"a") |
---|
5518 | fid.write(header) |
---|
5519 | fid.write(line) |
---|
5520 | fid.close() |
---|
5521 | if verbose: print 'file',file_header.strip('\n') |
---|
5522 | if verbose: print 'head',header.strip('\n') |
---|
5523 | if file_header.strip('\n')==str(header): print 'they equal' |
---|
5524 | msg = 'WARNING: File header does not match input info, the input variables have changed, suggest to change file name' |
---|
5525 | print msg |
---|
5526 | |
---|
5527 | |
---|
5528 | |
---|
5529 | # ---------------------------------------------- |
---|
5530 | # Functions to obtain diagnostics from sww files |
---|
5531 | #----------------------------------------------- |
---|
5532 | |
---|
5533 | def get_mesh_and_quantities_from_sww_file(filename, quantity_names, verbose=False): |
---|
5534 | """Get and rebuild mesh structure and the associated quantities from sww file |
---|
5535 | """ |
---|
5536 | |
---|
5537 | #FIXME(Ole): This is work in progress |
---|
5538 | |
---|
5539 | import types |
---|
5540 | # FIXME (Ole): Maybe refactor filefunction using this more fundamental code. |
---|
5541 | |
---|
5542 | return |
---|
5543 | |
---|
5544 | # Open NetCDF file |
---|
5545 | if verbose: print 'Reading', filename |
---|
5546 | fid = NetCDFFile(filename, 'r') |
---|
5547 | |
---|
5548 | if type(quantity_names) == types.StringType: |
---|
5549 | quantity_names = [quantity_names] |
---|
5550 | |
---|
5551 | if quantity_names is None or len(quantity_names) < 1: |
---|
5552 | msg = 'No quantities are specified' |
---|
5553 | raise Exception, msg |
---|
5554 | |
---|
5555 | # Now assert that requested quantitites (and the independent ones) |
---|
5556 | # are present in file |
---|
5557 | missing = [] |
---|
5558 | for quantity in ['x', 'y', 'volumes', 'time'] + quantity_names: |
---|
5559 | if not fid.variables.has_key(quantity): |
---|
5560 | missing.append(quantity) |
---|
5561 | |
---|
5562 | if len(missing) > 0: |
---|
5563 | msg = 'Quantities %s could not be found in file %s'\ |
---|
5564 | %(str(missing), filename) |
---|
5565 | fid.close() |
---|
5566 | raise Exception, msg |
---|
5567 | |
---|
5568 | if not filename.endswith('.sww'): |
---|
5569 | msg = 'Filename must have extension .sww' |
---|
5570 | raise Exception, msg |
---|
5571 | |
---|
5572 | # Get first timestep |
---|
5573 | try: |
---|
5574 | starttime = fid.starttime[0] |
---|
5575 | except ValueError: |
---|
5576 | msg = 'Could not read starttime from file %s' %filename |
---|
5577 | raise msg |
---|
5578 | |
---|
5579 | # Get variables |
---|
5580 | time = fid.variables['time'][:] |
---|
5581 | |
---|
5582 | # Get origin |
---|
5583 | xllcorner = fid.xllcorner[0] |
---|
5584 | yllcorner = fid.yllcorner[0] |
---|
5585 | zone = fid.zone[0] |
---|
5586 | georeference = Geo_reference(zone, xllcorner, yllcorner) |
---|
5587 | |
---|
5588 | |
---|
5589 | x = fid.variables['x'][:] |
---|
5590 | y = fid.variables['y'][:] |
---|
5591 | triangles = fid.variables['volumes'][:] |
---|
5592 | |
---|
5593 | x = reshape(x, (len(x),1)) |
---|
5594 | y = reshape(y, (len(y),1)) |
---|
5595 | vertex_coordinates = concatenate((x,y), axis=1) #m x 2 array |
---|
5596 | |
---|
5597 | #if interpolation_points is not None: |
---|
5598 | # # Adjust for georef |
---|
5599 | # interpolation_points[:,0] -= xllcorner |
---|
5600 | # interpolation_points[:,1] -= yllcorner |
---|
5601 | |
---|
5602 | |
---|
5603 | # Produce values for desired data points at |
---|
5604 | # each timestep for each quantity |
---|
5605 | quantities = {} |
---|
5606 | for name in quantity_names: |
---|
5607 | quantities[name] = fid.variables[name][:] |
---|
5608 | |
---|
5609 | fid.close() |
---|
5610 | |
---|
5611 | # Create mesh and quad tree |
---|
5612 | #interpolator = Interpolate(vertex_coordinates, triangles) |
---|
5613 | |
---|
5614 | #return interpolator, quantities, geo_reference, time |
---|
5615 | |
---|
5616 | |
---|
5617 | def get_flow_through_cross_section(filename, |
---|
5618 | polyline, |
---|
5619 | verbose=False): |
---|
5620 | """Obtain flow (m^3/s) perpendicular to cross section given by the argument polyline. |
---|
5621 | |
---|
5622 | Inputs: |
---|
5623 | filename: Name of sww file |
---|
5624 | polyline: Representation of desired cross section - it may contain multiple |
---|
5625 | sections allowing for complex shapes. |
---|
5626 | |
---|
5627 | Output: |
---|
5628 | Q: Hydrograph of total flow across given segments for all stored timesteps. |
---|
5629 | |
---|
5630 | The normal flow is computed for each triangle intersected by the polyline and added up. |
---|
5631 | If multiple sections are specified normal flows may partially cancel each other. |
---|
5632 | |
---|
5633 | """ |
---|
5634 | |
---|
5635 | # Get mesh and quantities from sww file |
---|
5636 | X = get_mesh_and_quantities_from_sww_file(filename, ['elevation', |
---|
5637 | 'stage', |
---|
5638 | 'xmomentum', |
---|
5639 | 'ymomentum'], verbose=verbose) |
---|
5640 | interpolator, quantities, geo_reference, time = X |
---|
5641 | |
---|
5642 | |
---|
5643 | |
---|
5644 | # Find all intersections and associated triangles. |
---|
5645 | |
---|
5646 | get_intersecting_segments(polyline) |
---|
5647 | |
---|
5648 | # Then store for each triangle the length of the intersecting segment(s), |
---|
5649 | # right hand normal(s) and midpoints. |
---|
5650 | pass |
---|
5651 | |
---|
5652 | |
---|
5653 | |
---|
5654 | def get_maximum_inundation_elevation(filename, |
---|
5655 | polygon=None, |
---|
5656 | time_interval=None, |
---|
5657 | verbose=False): |
---|
5658 | |
---|
5659 | """Return highest elevation where depth > 0 |
---|
5660 | |
---|
5661 | Usage: |
---|
5662 | max_runup = get_maximum_inundation_elevation(filename, |
---|
5663 | polygon=None, |
---|
5664 | time_interval=None, |
---|
5665 | verbose=False) |
---|
5666 | |
---|
5667 | filename is a NetCDF sww file containing ANUGA model output. |
---|
5668 | Optional arguments polygon and time_interval restricts the maximum |
---|
5669 | runup calculation |
---|
5670 | to a points that lie within the specified polygon and time interval. |
---|
5671 | |
---|
5672 | If no inundation is found within polygon and time_interval the return value |
---|
5673 | is None signifying "No Runup" or "Everything is dry". |
---|
5674 | |
---|
5675 | See general function get_maximum_inundation_data for details. |
---|
5676 | |
---|
5677 | """ |
---|
5678 | |
---|
5679 | runup, _ = get_maximum_inundation_data(filename, |
---|
5680 | polygon=polygon, |
---|
5681 | time_interval=time_interval, |
---|
5682 | verbose=verbose) |
---|
5683 | return runup |
---|
5684 | |
---|
5685 | |
---|
5686 | |
---|
5687 | |
---|
5688 | def get_maximum_inundation_location(filename, |
---|
5689 | polygon=None, |
---|
5690 | time_interval=None, |
---|
5691 | verbose=False): |
---|
5692 | """Return location of highest elevation where h > 0 |
---|
5693 | |
---|
5694 | |
---|
5695 | Usage: |
---|
5696 | max_runup_location = get_maximum_inundation_location(filename, |
---|
5697 | polygon=None, |
---|
5698 | time_interval=None, |
---|
5699 | verbose=False) |
---|
5700 | |
---|
5701 | filename is a NetCDF sww file containing ANUGA model output. |
---|
5702 | Optional arguments polygon and time_interval restricts the maximum |
---|
5703 | runup calculation |
---|
5704 | to a points that lie within the specified polygon and time interval. |
---|
5705 | |
---|
5706 | If no inundation is found within polygon and time_interval the return value |
---|
5707 | is None signifying "No Runup" or "Everything is dry". |
---|
5708 | |
---|
5709 | See general function get_maximum_inundation_data for details. |
---|
5710 | """ |
---|
5711 | |
---|
5712 | _, max_loc = get_maximum_inundation_data(filename, |
---|
5713 | polygon=polygon, |
---|
5714 | time_interval=time_interval, |
---|
5715 | verbose=verbose) |
---|
5716 | return max_loc |
---|
5717 | |
---|
5718 | |
---|
5719 | |
---|
5720 | def get_maximum_inundation_data(filename, polygon=None, time_interval=None, |
---|
5721 | use_centroid_values=False, |
---|
5722 | verbose=False): |
---|
5723 | """Compute maximum run up height from sww file. |
---|
5724 | |
---|
5725 | |
---|
5726 | Usage: |
---|
5727 | runup, location = get_maximum_inundation_data(filename, |
---|
5728 | polygon=None, |
---|
5729 | time_interval=None, |
---|
5730 | verbose=False) |
---|
5731 | |
---|
5732 | |
---|
5733 | Algorithm is as in get_maximum_inundation_elevation from |
---|
5734 | shallow_water_domain |
---|
5735 | except that this function works with the sww file and computes the maximal |
---|
5736 | runup height over multiple timesteps. |
---|
5737 | |
---|
5738 | Optional arguments polygon and time_interval restricts the |
---|
5739 | maximum runup calculation |
---|
5740 | to a points that lie within the specified polygon and time interval. |
---|
5741 | Polygon is |
---|
5742 | assumed to be in (absolute) UTM coordinates in the same zone as domain. |
---|
5743 | |
---|
5744 | If no inundation is found within polygon and time_interval the return value |
---|
5745 | is None signifying "No Runup" or "Everything is dry". |
---|
5746 | """ |
---|
5747 | |
---|
5748 | # We are using nodal values here as that is what is stored in sww files. |
---|
5749 | |
---|
5750 | # Water depth below which it is considered to be 0 in the model |
---|
5751 | # FIXME (Ole): Allow this to be specified as a keyword argument as well |
---|
5752 | |
---|
5753 | from anuga.utilities.polygon import inside_polygon |
---|
5754 | from anuga.config import minimum_allowed_height |
---|
5755 | from Scientific.IO.NetCDF import NetCDFFile |
---|
5756 | |
---|
5757 | dir, base = os.path.split(filename) |
---|
5758 | |
---|
5759 | iterate_over = get_all_swwfiles(dir,base) |
---|
5760 | |
---|
5761 | # Read sww file |
---|
5762 | if verbose: |
---|
5763 | print 'Reading from %s' %filename |
---|
5764 | # FIXME: Use general swwstats (when done) |
---|
5765 | |
---|
5766 | maximal_runup = None |
---|
5767 | maximal_runup_location = None |
---|
5768 | |
---|
5769 | for file, swwfile in enumerate (iterate_over): |
---|
5770 | |
---|
5771 | # Read sww file |
---|
5772 | filename = join(dir,swwfile+'.sww') |
---|
5773 | |
---|
5774 | if verbose: |
---|
5775 | print 'Reading from %s' %filename |
---|
5776 | # FIXME: Use general swwstats (when done) |
---|
5777 | |
---|
5778 | fid = NetCDFFile(filename) |
---|
5779 | |
---|
5780 | # Get geo_reference |
---|
5781 | # sww files don't have to have a geo_ref |
---|
5782 | try: |
---|
5783 | geo_reference = Geo_reference(NetCDFObject=fid) |
---|
5784 | except AttributeError, e: |
---|
5785 | geo_reference = Geo_reference() # Default georef object |
---|
5786 | |
---|
5787 | xllcorner = geo_reference.get_xllcorner() |
---|
5788 | yllcorner = geo_reference.get_yllcorner() |
---|
5789 | zone = geo_reference.get_zone() |
---|
5790 | |
---|
5791 | # Get extent |
---|
5792 | volumes = fid.variables['volumes'][:] |
---|
5793 | x = fid.variables['x'][:] + xllcorner |
---|
5794 | y = fid.variables['y'][:] + yllcorner |
---|
5795 | |
---|
5796 | |
---|
5797 | # Get the relevant quantities (Convert from single precison) |
---|
5798 | elevation = array(fid.variables['elevation'][:], Float) |
---|
5799 | stage = array(fid.variables['stage'][:], Float) |
---|
5800 | |
---|
5801 | |
---|
5802 | # Here's where one could convert nodal information to centroid |
---|
5803 | # information |
---|
5804 | # but is probably something we need to write in C. |
---|
5805 | # Here's a Python thought which is NOT finished!!! |
---|
5806 | if use_centroid_values is True: |
---|
5807 | x = get_centroid_values(x, volumes) |
---|
5808 | y = get_centroid_values(y, volumes) |
---|
5809 | elevation = get_centroid_values(elevation, volumes) |
---|
5810 | |
---|
5811 | |
---|
5812 | # Spatial restriction |
---|
5813 | if polygon is not None: |
---|
5814 | msg = 'polygon must be a sequence of points.' |
---|
5815 | assert len(polygon[0]) == 2, msg |
---|
5816 | # FIXME (Ole): Make a generic polygon input check in polygon.py |
---|
5817 | # and call it here |
---|
5818 | |
---|
5819 | points = concatenate((x[:,NewAxis], y[:,NewAxis]), axis=1) |
---|
5820 | |
---|
5821 | point_indices = inside_polygon(points, polygon) |
---|
5822 | |
---|
5823 | # Restrict quantities to polygon |
---|
5824 | elevation = take(elevation, point_indices) |
---|
5825 | stage = take(stage, point_indices, axis=1) |
---|
5826 | |
---|
5827 | # Get info for location of maximal runup |
---|
5828 | points_in_polygon = take(points, point_indices) |
---|
5829 | x = points_in_polygon[:,0] |
---|
5830 | y = points_in_polygon[:,1] |
---|
5831 | else: |
---|
5832 | # Take all points |
---|
5833 | point_indices = arange(len(x)) |
---|
5834 | |
---|
5835 | |
---|
5836 | # Temporal restriction |
---|
5837 | time = fid.variables['time'][:] |
---|
5838 | all_timeindices = arange(len(time)) |
---|
5839 | if time_interval is not None: |
---|
5840 | |
---|
5841 | msg = 'time_interval must be a sequence of length 2.' |
---|
5842 | assert len(time_interval) == 2, msg |
---|
5843 | msg = 'time_interval %s must not be decreasing.' %(time_interval) |
---|
5844 | assert time_interval[1] >= time_interval[0], msg |
---|
5845 | |
---|
5846 | msg = 'Specified time interval [%.8f:%.8f]' %tuple(time_interval) |
---|
5847 | msg += ' must does not match model time interval: [%.8f, %.8f]\n'\ |
---|
5848 | %(time[0], time[-1]) |
---|
5849 | if time_interval[1] < time[0]: raise ValueError(msg) |
---|
5850 | if time_interval[0] > time[-1]: raise ValueError(msg) |
---|
5851 | |
---|
5852 | # Take time indices corresponding to interval (& is bitwise AND) |
---|
5853 | timesteps = compress((time_interval[0] <= time) & (time <= time_interval[1]), |
---|
5854 | all_timeindices) |
---|
5855 | |
---|
5856 | |
---|
5857 | msg = 'time_interval %s did not include any model timesteps.' %(time_interval) |
---|
5858 | assert not alltrue(timesteps == 0), msg |
---|
5859 | |
---|
5860 | |
---|
5861 | else: |
---|
5862 | # Take them all |
---|
5863 | timesteps = all_timeindices |
---|
5864 | |
---|
5865 | |
---|
5866 | fid.close() |
---|
5867 | |
---|
5868 | # Compute maximal runup for each timestep |
---|
5869 | #maximal_runup = None |
---|
5870 | #maximal_runup_location = None |
---|
5871 | #maximal_runups = [None] |
---|
5872 | #maximal_runup_locations = [None] |
---|
5873 | |
---|
5874 | for i in timesteps: |
---|
5875 | if use_centroid_values is True: |
---|
5876 | stage_i = get_centroid_values(stage[i,:], volumes) |
---|
5877 | else: |
---|
5878 | stage_i = stage[i,:] |
---|
5879 | |
---|
5880 | depth = stage_i - elevation |
---|
5881 | |
---|
5882 | # Get wet nodes i.e. nodes with depth>0 within given region and timesteps |
---|
5883 | wet_nodes = compress(depth > minimum_allowed_height, arange(len(depth))) |
---|
5884 | |
---|
5885 | if alltrue(wet_nodes == 0): |
---|
5886 | runup = None |
---|
5887 | else: |
---|
5888 | # Find maximum elevation among wet nodes |
---|
5889 | wet_elevation = take(elevation, wet_nodes) |
---|
5890 | |
---|
5891 | runup_index = argmax(wet_elevation) |
---|
5892 | runup = max(wet_elevation) |
---|
5893 | assert wet_elevation[runup_index] == runup # Must be True |
---|
5894 | #print "runup", runup |
---|
5895 | #print "maximal_runup", maximal_runup |
---|
5896 | |
---|
5897 | if runup > maximal_runup: |
---|
5898 | maximal_runup = runup # This works even if maximal_runups is None |
---|
5899 | #print "NEW RUNUP",runup |
---|
5900 | |
---|
5901 | # Record location |
---|
5902 | wet_x = take(x, wet_nodes) |
---|
5903 | wet_y = take(y, wet_nodes) |
---|
5904 | maximal_runup_location = [wet_x[runup_index], wet_y[runup_index]] |
---|
5905 | |
---|
5906 | #print 'maximal_runup, maximal_runup_location',maximal_runup, maximal_runup_location |
---|
5907 | return maximal_runup, maximal_runup_location |
---|
5908 | |
---|
5909 | def get_all_swwfiles(look_in_dir='',base_name='',verbose=False): |
---|
5910 | ''' |
---|
5911 | Finds all the sww files in a "look_in_dir" which contains a "base_name". |
---|
5912 | will accept base_name with or without the extension ".sww" |
---|
5913 | |
---|
5914 | Returns: a list of strings |
---|
5915 | |
---|
5916 | Usage: iterate_over = get_all_swwfiles(dir, name) |
---|
5917 | then |
---|
5918 | for swwfile in iterate_over: |
---|
5919 | do stuff |
---|
5920 | |
---|
5921 | Check "export_grids" and "get_maximum_inundation_data" for examples |
---|
5922 | ''' |
---|
5923 | |
---|
5924 | #plus tests the extension |
---|
5925 | name, extension = os.path.splitext(base_name) |
---|
5926 | |
---|
5927 | if extension <>'' and extension <> '.sww': |
---|
5928 | msg = msg='file %s %s must be an NetCDF sww file!'%(base_name,extension) |
---|
5929 | raise IOError, msg |
---|
5930 | |
---|
5931 | if look_in_dir == "": |
---|
5932 | look_in_dir = "." # Unix compatibility |
---|
5933 | |
---|
5934 | dir_ls = os.listdir(look_in_dir) |
---|
5935 | #print 'dir_ls',dir_ls, base |
---|
5936 | iterate_over = [x[:-4] for x in dir_ls if name in x and x[-4:] == '.sww'] |
---|
5937 | if len(iterate_over) == 0: |
---|
5938 | msg = 'No files of the base name %s'\ |
---|
5939 | %(name) |
---|
5940 | raise IOError, msg |
---|
5941 | if verbose: print 'iterate over %s' %(iterate_over) |
---|
5942 | |
---|
5943 | return iterate_over |
---|
5944 | |
---|
5945 | def get_all_files_with_extension(look_in_dir='',base_name='',extension='.sww',verbose=False): |
---|
5946 | ''' |
---|
5947 | Finds all the sww files in a "look_in_dir" which contains a "base_name". |
---|
5948 | |
---|
5949 | |
---|
5950 | Returns: a list of strings |
---|
5951 | |
---|
5952 | Usage: iterate_over = get_all_swwfiles(dir, name) |
---|
5953 | then |
---|
5954 | for swwfile in iterate_over: |
---|
5955 | do stuff |
---|
5956 | |
---|
5957 | Check "export_grids" and "get_maximum_inundation_data" for examples |
---|
5958 | ''' |
---|
5959 | |
---|
5960 | #plus tests the extension |
---|
5961 | name, ext = os.path.splitext(base_name) |
---|
5962 | # print 'look_in_dir',look_in_dir |
---|
5963 | |
---|
5964 | if ext <>'' and ext <> extension: |
---|
5965 | msg = msg='base_name %s must be an file with %s extension!'%(base_name,extension) |
---|
5966 | raise IOError, msg |
---|
5967 | |
---|
5968 | if look_in_dir == "": |
---|
5969 | look_in_dir = "." # Unix compatibility |
---|
5970 | # print 'look_in_dir',look_in_dir, getcwd() |
---|
5971 | dir_ls = os.listdir(look_in_dir) |
---|
5972 | #print 'dir_ls',dir_ls, base_name |
---|
5973 | iterate_over = [x[:-4] for x in dir_ls if name in x and x[-4:] == extension] |
---|
5974 | if len(iterate_over) == 0: |
---|
5975 | msg = 'No files of the base name %s in %s'\ |
---|
5976 | %(name, look_in_dir) |
---|
5977 | raise IOError, msg |
---|
5978 | if verbose: print 'iterate over %s' %(iterate_over) |
---|
5979 | |
---|
5980 | return iterate_over |
---|
5981 | |
---|
5982 | def get_all_directories_with_name(look_in_dir='',base_name='',verbose=False): |
---|
5983 | ''' |
---|
5984 | Finds all the sww files in a "look_in_dir" which contains a "base_name". |
---|
5985 | |
---|
5986 | |
---|
5987 | Returns: a list of strings |
---|
5988 | |
---|
5989 | Usage: iterate_over = get_all_swwfiles(dir, name) |
---|
5990 | then |
---|
5991 | for swwfile in iterate_over: |
---|
5992 | do stuff |
---|
5993 | |
---|
5994 | Check "export_grids" and "get_maximum_inundation_data" for examples |
---|
5995 | ''' |
---|
5996 | |
---|
5997 | #plus tests the extension |
---|
5998 | |
---|
5999 | if look_in_dir == "": |
---|
6000 | look_in_dir = "." # Unix compatibility |
---|
6001 | # print 'look_in_dir',look_in_dir |
---|
6002 | dir_ls = os.listdir(look_in_dir) |
---|
6003 | # print 'dir_ls',dir_ls |
---|
6004 | iterate_over = [x for x in dir_ls if base_name in x] |
---|
6005 | if len(iterate_over) == 0: |
---|
6006 | msg = 'No files of the base name %s'\ |
---|
6007 | %(name) |
---|
6008 | raise IOError, msg |
---|
6009 | if verbose: print 'iterate over %s' %(iterate_over) |
---|
6010 | |
---|
6011 | return iterate_over |
---|
6012 | |
---|
6013 | def points2polygon(points_file, |
---|
6014 | minimum_triangle_angle=3.0): |
---|
6015 | """ |
---|
6016 | WARNING: This function is not fully working. |
---|
6017 | |
---|
6018 | Function to return a polygon returned from alpha shape, given a points file. |
---|
6019 | |
---|
6020 | WARNING: Alpha shape returns multiple polygons, but this function only returns one polygon. |
---|
6021 | |
---|
6022 | """ |
---|
6023 | from anuga.pmesh.mesh import Mesh, importMeshFromFile |
---|
6024 | from anuga.shallow_water import Domain |
---|
6025 | from anuga.pmesh.mesh_interface import create_mesh_from_regions |
---|
6026 | |
---|
6027 | mesh = importMeshFromFile(points_file) |
---|
6028 | mesh.auto_segment() |
---|
6029 | mesh.exportASCIIsegmentoutlinefile("outline.tsh") |
---|
6030 | mesh2 = importMeshFromFile("outline.tsh") |
---|
6031 | mesh2.generate_mesh(maximum_triangle_area=1000000000, minimum_triangle_angle=minimum_triangle_angle, verbose=False) |
---|
6032 | mesh2.export_mesh_file('outline_meshed.tsh') |
---|
6033 | domain = Domain("outline_meshed.tsh", use_cache = False) |
---|
6034 | polygon = domain.get_boundary_polygon() |
---|
6035 | return polygon |
---|
6036 | |
---|
6037 | #------------------------------------------------------------- |
---|
6038 | if __name__ == "__main__": |
---|
6039 | #setting umask from config to force permissions for all files and directories |
---|
6040 | # created to the same. (it was noticed the "mpirun" doesn't honour the umask |
---|
6041 | # set in your .bashrc etc file) |
---|
6042 | from config import umask |
---|
6043 | import os |
---|
6044 | os.umask(umask) |
---|