1 | """datamanager.py - input output for AnuGA |
---|
2 | |
---|
3 | |
---|
4 | This module takes care of reading and writing datafiles such as topograhies, |
---|
5 | model output, etc |
---|
6 | |
---|
7 | |
---|
8 | Formats used within AnuGA: |
---|
9 | |
---|
10 | .sww: Netcdf format for storing model output f(t,x,y) |
---|
11 | .tms: Netcdf format for storing time series f(t) |
---|
12 | |
---|
13 | .csv: ASCII format for storing arbitrary points and associated attributes |
---|
14 | .pts: NetCDF format for storing arbitrary points and associated attributes |
---|
15 | |
---|
16 | .asc: ASCII format of regular DEMs as output from ArcView |
---|
17 | .prj: Associated ArcView file giving more meta data for asc format |
---|
18 | .ers: ERMapper header format of regular DEMs for ArcView |
---|
19 | |
---|
20 | .dem: NetCDF representation of regular DEM data |
---|
21 | |
---|
22 | .tsh: ASCII format for storing meshes and associated boundary and region info |
---|
23 | .msh: NetCDF format for storing meshes and associated boundary and region info |
---|
24 | |
---|
25 | .nc: Native ferret NetCDF format |
---|
26 | .geo: Houdinis ascii geometry format (?) |
---|
27 | |
---|
28 | |
---|
29 | A typical dataflow can be described as follows |
---|
30 | |
---|
31 | Manually created files: |
---|
32 | ASC, PRJ: Digital elevation models (gridded) |
---|
33 | TSH: Triangular meshes (e.g. created from anuga.pmesh) |
---|
34 | NC Model outputs for use as boundary conditions (e.g from MOST) |
---|
35 | |
---|
36 | |
---|
37 | AUTOMATICALLY CREATED FILES: |
---|
38 | |
---|
39 | ASC, PRJ -> DEM -> PTS: Conversion of DEM's to native pts file |
---|
40 | |
---|
41 | NC -> SWW: Conversion of MOST bundary files to boundary sww |
---|
42 | |
---|
43 | PTS + TSH -> TSH with elevation: Least squares fit |
---|
44 | |
---|
45 | TSH -> SWW: Conversion of TSH to sww viewable using Swollen |
---|
46 | |
---|
47 | TSH + Boundary SWW -> SWW: Simluation using abstract_2d_finite_volumes |
---|
48 | |
---|
49 | """ |
---|
50 | |
---|
51 | # This file was reverted from changeset:5484 to changeset:5470 on 10th July |
---|
52 | # by Ole. |
---|
53 | |
---|
54 | import os, sys |
---|
55 | import csv |
---|
56 | import string |
---|
57 | import shutil |
---|
58 | from struct import unpack |
---|
59 | import array as p_array |
---|
60 | from os import sep, path, remove, mkdir, access, F_OK, W_OK, getcwd |
---|
61 | |
---|
62 | import numpy as num |
---|
63 | |
---|
64 | from Scientific.IO.NetCDF import NetCDFFile |
---|
65 | from os.path import exists, basename, join |
---|
66 | from os import getcwd |
---|
67 | |
---|
68 | from anuga.coordinate_transforms.redfearn import redfearn, \ |
---|
69 | convert_from_latlon_to_utm |
---|
70 | from anuga.coordinate_transforms.geo_reference import Geo_reference, \ |
---|
71 | write_NetCDF_georeference, ensure_geo_reference |
---|
72 | from anuga.geospatial_data.geospatial_data import Geospatial_data,\ |
---|
73 | ensure_absolute |
---|
74 | from anuga.config import minimum_storable_height as \ |
---|
75 | default_minimum_storable_height |
---|
76 | from anuga.config import netcdf_mode_r, netcdf_mode_w, netcdf_mode_a |
---|
77 | from anuga.config import netcdf_float, netcdf_float32, netcdf_int |
---|
78 | from anuga.config import max_float |
---|
79 | from anuga.utilities.numerical_tools import ensure_numeric, mean |
---|
80 | from anuga.caching.caching import myhash |
---|
81 | from anuga.shallow_water.shallow_water_domain import Domain |
---|
82 | from anuga.abstract_2d_finite_volumes.pmesh2domain import \ |
---|
83 | pmesh_to_domain_instance |
---|
84 | from anuga.abstract_2d_finite_volumes.util import get_revision_number, \ |
---|
85 | remove_lone_verts, sww2timeseries, get_centroid_values |
---|
86 | |
---|
87 | from anuga.abstract_2d_finite_volumes.neighbour_mesh import segment_midpoints |
---|
88 | from anuga.load_mesh.loadASCII import export_mesh_file |
---|
89 | from anuga.geometry.polygon import intersection |
---|
90 | from anuga.file_conversion.sww2dem import sww2dem |
---|
91 | |
---|
92 | from anuga.utilities.system_tools import get_vars_in_expression |
---|
93 | import anuga.utilities.log as log |
---|
94 | |
---|
95 | from anuga.utilities.file_utils import create_filename,\ |
---|
96 | get_all_swwfiles |
---|
97 | from anuga.file.csv_file import load_csv_as_dict |
---|
98 | from sww_file import Read_sww, Write_sww |
---|
99 | |
---|
100 | from anuga.anuga_exceptions import DataMissingValuesError, \ |
---|
101 | DataFileNotOpenError, DataTimeError, DataDomainError, \ |
---|
102 | NewQuantity |
---|
103 | |
---|
104 | |
---|
105 | |
---|
106 | def csv2building_polygons(file_name, |
---|
107 | floor_height=3, |
---|
108 | clipping_polygons=None): |
---|
109 | """ |
---|
110 | Convert CSV files of the form: |
---|
111 | |
---|
112 | easting,northing,id,floors |
---|
113 | 422664.22,870785.46,2,0 |
---|
114 | 422672.48,870780.14,2,0 |
---|
115 | 422668.17,870772.62,2,0 |
---|
116 | 422660.35,870777.17,2,0 |
---|
117 | 422664.22,870785.46,2,0 |
---|
118 | 422661.30,871215.06,3,1 |
---|
119 | 422667.50,871215.70,3,1 |
---|
120 | 422668.30,871204.86,3,1 |
---|
121 | 422662.21,871204.33,3,1 |
---|
122 | 422661.30,871215.06,3,1 |
---|
123 | |
---|
124 | to a dictionary of polygons with id as key. |
---|
125 | The associated number of floors are converted to m above MSL and |
---|
126 | returned as a separate dictionary also keyed by id. |
---|
127 | |
---|
128 | Optional parameter floor_height is the height of each building story. |
---|
129 | Optional parameter clipping_olygons is a list of polygons selecting |
---|
130 | buildings. Any building not in these polygons will be omitted. |
---|
131 | |
---|
132 | See csv2polygons for more details |
---|
133 | """ |
---|
134 | |
---|
135 | polygons, values = csv2polygons(file_name, |
---|
136 | value_name='floors', |
---|
137 | clipping_polygons=None) |
---|
138 | |
---|
139 | |
---|
140 | heights = {} |
---|
141 | for key in values.keys(): |
---|
142 | v = float(values[key]) |
---|
143 | heights[key] = v*floor_height |
---|
144 | |
---|
145 | return polygons, heights |
---|
146 | |
---|
147 | |
---|
148 | ## |
---|
149 | # @brief Convert CSV file into a dictionary of polygons and associated values. |
---|
150 | # @param filename The path to the file to read, value_name name for the 4th column |
---|
151 | def csv2polygons(file_name, |
---|
152 | value_name='value', |
---|
153 | clipping_polygons=None): |
---|
154 | """ |
---|
155 | Convert CSV files of the form: |
---|
156 | |
---|
157 | easting,northing,id,value |
---|
158 | 422664.22,870785.46,2,0 |
---|
159 | 422672.48,870780.14,2,0 |
---|
160 | 422668.17,870772.62,2,0 |
---|
161 | 422660.35,870777.17,2,0 |
---|
162 | 422664.22,870785.46,2,0 |
---|
163 | 422661.30,871215.06,3,1 |
---|
164 | 422667.50,871215.70,3,1 |
---|
165 | 422668.30,871204.86,3,1 |
---|
166 | 422662.21,871204.33,3,1 |
---|
167 | 422661.30,871215.06,3,1 |
---|
168 | |
---|
169 | to a dictionary of polygons with id as key. |
---|
170 | The associated values are returned as a separate dictionary also keyed by id. |
---|
171 | |
---|
172 | |
---|
173 | easting: x coordinate relative to zone implied by the model |
---|
174 | northing: y coordinate relative to zone implied by the model |
---|
175 | id: tag for polygon comprising points with this tag |
---|
176 | value: numeral associated with each polygon. These must be the same for all points in each polygon. |
---|
177 | |
---|
178 | The last header, value, can take on other names such as roughness, floors, etc - or it can be omitted |
---|
179 | in which case the returned values will be None |
---|
180 | |
---|
181 | Eastings and Northings will be returned as floating point values while |
---|
182 | id and values will be returned as strings. |
---|
183 | |
---|
184 | Optional argument: clipping_polygons will select only those polygons that are |
---|
185 | fully within one or more of the clipping_polygons. In other words any polygon from |
---|
186 | the csv file which has at least one point not inside one of the clipping polygons |
---|
187 | will be excluded |
---|
188 | |
---|
189 | See underlying function load_csv_as_dict for more details. |
---|
190 | """ |
---|
191 | |
---|
192 | X, _ = load_csv_as_dict(file_name) |
---|
193 | |
---|
194 | msg = 'Polygon csv file must have 3 or 4 columns' |
---|
195 | assert len(X.keys()) in [3, 4], msg |
---|
196 | |
---|
197 | msg = 'Did not find expected column header: easting' |
---|
198 | assert 'easting' in X.keys(), msg |
---|
199 | |
---|
200 | msg = 'Did not find expected column header: northing' |
---|
201 | assert 'northing' in X.keys(), northing |
---|
202 | |
---|
203 | msg = 'Did not find expected column header: northing' |
---|
204 | assert 'id' in X.keys(), msg |
---|
205 | |
---|
206 | if value_name is not None: |
---|
207 | msg = 'Did not find expected column header: %s' % value_name |
---|
208 | assert value_name in X.keys(), msg |
---|
209 | |
---|
210 | polygons = {} |
---|
211 | if len(X.keys()) == 4: |
---|
212 | values = {} |
---|
213 | else: |
---|
214 | values = None |
---|
215 | |
---|
216 | # Loop through entries and compose polygons |
---|
217 | excluded_polygons={} |
---|
218 | past_ids = {} |
---|
219 | last_id = None |
---|
220 | for i, id in enumerate(X['id']): |
---|
221 | |
---|
222 | # Check for duplicate polygons |
---|
223 | if id in past_ids: |
---|
224 | msg = 'Polygon %s was duplicated in line %d' % (id, i) |
---|
225 | raise Exception, msg |
---|
226 | |
---|
227 | if id not in polygons: |
---|
228 | # Start new polygon |
---|
229 | polygons[id] = [] |
---|
230 | if values is not None: |
---|
231 | values[id] = X[value_name][i] |
---|
232 | |
---|
233 | # Keep track of previous polygon ids |
---|
234 | if last_id is not None: |
---|
235 | past_ids[last_id] = i |
---|
236 | |
---|
237 | # Append this point to current polygon |
---|
238 | point = [float(X['easting'][i]), float(X['northing'][i])] |
---|
239 | |
---|
240 | if clipping_polygons is not None: |
---|
241 | exclude=True |
---|
242 | for clipping_polygon in clipping_polygons: |
---|
243 | if inside_polygon(point, clipping_polygon): |
---|
244 | exclude=False |
---|
245 | break |
---|
246 | |
---|
247 | if exclude is True: |
---|
248 | excluded_polygons[id]=True |
---|
249 | |
---|
250 | polygons[id].append(point) |
---|
251 | |
---|
252 | # Check that value is the same across each polygon |
---|
253 | msg = 'Values must be the same across each polygon.' |
---|
254 | msg += 'I got %s in line %d but it should have been %s' % (X[value_name][i], i, values[id]) |
---|
255 | assert values[id] == X[value_name][i], msg |
---|
256 | |
---|
257 | last_id = id |
---|
258 | |
---|
259 | # Weed out polygons that were not wholly inside clipping polygons |
---|
260 | for id in excluded_polygons: |
---|
261 | del polygons[id] |
---|
262 | |
---|
263 | return polygons, values |
---|
264 | |
---|
265 | |
---|
266 | |
---|
267 | |
---|
268 | |
---|
269 | |
---|
270 | |
---|
271 | ## |
---|
272 | # @brief Filter data file, selecting timesteps first:step:last. |
---|
273 | # @param filename1 Data file to filter. |
---|
274 | # @param filename2 File to write filtered timesteps to. |
---|
275 | # @param first First timestep. |
---|
276 | # @param last Last timestep. |
---|
277 | # @param step Timestep stride. |
---|
278 | def filter_netcdf(filename1, filename2, first=0, last=None, step=1): |
---|
279 | """Filter data file, selecting timesteps first:step:last. |
---|
280 | |
---|
281 | Read netcdf filename1, pick timesteps first:step:last and save to |
---|
282 | nettcdf file filename2 |
---|
283 | """ |
---|
284 | |
---|
285 | from Scientific.IO.NetCDF import NetCDFFile |
---|
286 | |
---|
287 | # Get NetCDF |
---|
288 | infile = NetCDFFile(filename1, netcdf_mode_r) #Open existing file for read |
---|
289 | outfile = NetCDFFile(filename2, netcdf_mode_w) #Open new file |
---|
290 | |
---|
291 | # Copy dimensions |
---|
292 | for d in infile.dimensions: |
---|
293 | outfile.createDimension(d, infile.dimensions[d]) |
---|
294 | |
---|
295 | # Copy variable definitions |
---|
296 | for name in infile.variables: |
---|
297 | var = infile.variables[name] |
---|
298 | outfile.createVariable(name, var.dtype.char, var.dimensions) |
---|
299 | |
---|
300 | # Copy the static variables |
---|
301 | for name in infile.variables: |
---|
302 | if name == 'time' or name == 'stage': |
---|
303 | pass |
---|
304 | else: |
---|
305 | outfile.variables[name][:] = infile.variables[name][:] |
---|
306 | |
---|
307 | # Copy selected timesteps |
---|
308 | time = infile.variables['time'] |
---|
309 | stage = infile.variables['stage'] |
---|
310 | |
---|
311 | newtime = outfile.variables['time'] |
---|
312 | newstage = outfile.variables['stage'] |
---|
313 | |
---|
314 | if last is None: |
---|
315 | last = len(time) |
---|
316 | |
---|
317 | selection = range(first, last, step) |
---|
318 | for i, j in enumerate(selection): |
---|
319 | log.critical('Copying timestep %d of %d (%f)' |
---|
320 | % (j, last-first, time[j])) |
---|
321 | newtime[i] = time[j] |
---|
322 | newstage[i,:] = stage[j,:] |
---|
323 | |
---|
324 | # Close |
---|
325 | infile.close() |
---|
326 | outfile.close() |
---|
327 | |
---|
328 | |
---|
329 | ## |
---|
330 | # @brief |
---|
331 | # @param basename_in |
---|
332 | # @param extra_name_out |
---|
333 | # @param quantities |
---|
334 | # @param timestep |
---|
335 | # @param reduction |
---|
336 | # @param cellsize |
---|
337 | # @param number_of_decimal_places |
---|
338 | # @param NODATA_value |
---|
339 | # @param easting_min |
---|
340 | # @param easting_max |
---|
341 | # @param northing_min |
---|
342 | # @param northing_max |
---|
343 | # @param verbose |
---|
344 | # @param origin |
---|
345 | # @param datum |
---|
346 | # @param format |
---|
347 | # @return |
---|
348 | def export_grid(basename_in, extra_name_out=None, |
---|
349 | quantities=None, # defaults to elevation |
---|
350 | reduction=None, |
---|
351 | cellsize=10, |
---|
352 | number_of_decimal_places=None, |
---|
353 | NODATA_value=-9999, |
---|
354 | easting_min=None, |
---|
355 | easting_max=None, |
---|
356 | northing_min=None, |
---|
357 | northing_max=None, |
---|
358 | verbose=False, |
---|
359 | origin=None, |
---|
360 | datum='WGS84', |
---|
361 | format='ers'): |
---|
362 | """Wrapper for sww2dem. |
---|
363 | See sww2dem to find out what most of the parameters do. |
---|
364 | |
---|
365 | Quantities is a list of quantities. Each quantity will be |
---|
366 | calculated for each sww file. |
---|
367 | |
---|
368 | This returns the basenames of the files returned, which is made up |
---|
369 | of the dir and all of the file name, except the extension. |
---|
370 | |
---|
371 | This function returns the names of the files produced. |
---|
372 | |
---|
373 | It will also produce as many output files as there are input sww files. |
---|
374 | """ |
---|
375 | |
---|
376 | if quantities is None: |
---|
377 | quantities = ['elevation'] |
---|
378 | |
---|
379 | if type(quantities) is str: |
---|
380 | quantities = [quantities] |
---|
381 | |
---|
382 | # How many sww files are there? |
---|
383 | dir, base = os.path.split(basename_in) |
---|
384 | |
---|
385 | iterate_over = get_all_swwfiles(dir, base, verbose) |
---|
386 | |
---|
387 | if dir == "": |
---|
388 | dir = "." # Unix compatibility |
---|
389 | |
---|
390 | files_out = [] |
---|
391 | for sww_file in iterate_over: |
---|
392 | for quantity in quantities: |
---|
393 | if extra_name_out is None: |
---|
394 | basename_out = sww_file + '_' + quantity |
---|
395 | else: |
---|
396 | basename_out = sww_file + '_' + quantity + '_' + extra_name_out |
---|
397 | |
---|
398 | file_out = sww2dem(dir+sep+sww_file, dir+sep+basename_out, |
---|
399 | quantity, |
---|
400 | reduction, |
---|
401 | cellsize, |
---|
402 | number_of_decimal_places, |
---|
403 | NODATA_value, |
---|
404 | easting_min, |
---|
405 | easting_max, |
---|
406 | northing_min, |
---|
407 | northing_max, |
---|
408 | verbose, |
---|
409 | origin, |
---|
410 | datum, |
---|
411 | format) |
---|
412 | files_out.append(file_out) |
---|
413 | return files_out |
---|
414 | |
---|
415 | |
---|
416 | ## |
---|
417 | # @brief Read DEM file, decimate data, write new DEM file. |
---|
418 | # @param basename_in Stem of the input filename. |
---|
419 | # @param stencil |
---|
420 | # @param cellsize_new New cell size to resample on. |
---|
421 | # @param basename_out Stem of the output filename. |
---|
422 | # @param verbose True if this function is to be verbose. |
---|
423 | def decimate_dem(basename_in, stencil, cellsize_new, basename_out=None, |
---|
424 | verbose=False): |
---|
425 | """Read Digitial Elevation model from the following NetCDF format (.dem) |
---|
426 | |
---|
427 | Example: |
---|
428 | |
---|
429 | ncols 3121 |
---|
430 | nrows 1800 |
---|
431 | xllcorner 722000 |
---|
432 | yllcorner 5893000 |
---|
433 | cellsize 25 |
---|
434 | NODATA_value -9999 |
---|
435 | 138.3698 137.4194 136.5062 135.5558 .......... |
---|
436 | |
---|
437 | Decimate data to cellsize_new using stencil and write to NetCDF dem format. |
---|
438 | """ |
---|
439 | |
---|
440 | import os |
---|
441 | from Scientific.IO.NetCDF import NetCDFFile |
---|
442 | |
---|
443 | root = basename_in |
---|
444 | inname = root + '.dem' |
---|
445 | |
---|
446 | #Open existing netcdf file to read |
---|
447 | infile = NetCDFFile(inname, netcdf_mode_r) |
---|
448 | |
---|
449 | if verbose: log.critical('Reading DEM from %s' % inname) |
---|
450 | |
---|
451 | # Read metadata (convert from numpy.int32 to int where appropriate) |
---|
452 | ncols = int(infile.ncols[0]) |
---|
453 | nrows = int(infile.nrows[0]) |
---|
454 | xllcorner = infile.xllcorner[0] |
---|
455 | yllcorner = infile.yllcorner[0] |
---|
456 | cellsize = int(infile.cellsize[0]) |
---|
457 | NODATA_value = int(infile.NODATA_value[0]) |
---|
458 | zone = int(infile.zone[0]) |
---|
459 | false_easting = infile.false_easting[0] |
---|
460 | false_northing = infile.false_northing[0] |
---|
461 | projection = infile.projection |
---|
462 | datum = infile.datum |
---|
463 | units = infile.units |
---|
464 | |
---|
465 | dem_elevation = infile.variables['elevation'] |
---|
466 | |
---|
467 | #Get output file name |
---|
468 | if basename_out == None: |
---|
469 | outname = root + '_' + repr(cellsize_new) + '.dem' |
---|
470 | else: |
---|
471 | outname = basename_out + '.dem' |
---|
472 | |
---|
473 | if verbose: log.critical('Write decimated NetCDF file to %s' % outname) |
---|
474 | |
---|
475 | #Determine some dimensions for decimated grid |
---|
476 | (nrows_stencil, ncols_stencil) = stencil.shape |
---|
477 | x_offset = ncols_stencil / 2 |
---|
478 | y_offset = nrows_stencil / 2 |
---|
479 | cellsize_ratio = int(cellsize_new / cellsize) |
---|
480 | ncols_new = 1 + (ncols - ncols_stencil) / cellsize_ratio |
---|
481 | nrows_new = 1 + (nrows - nrows_stencil) / cellsize_ratio |
---|
482 | |
---|
483 | #print type(ncols_new), ncols_new |
---|
484 | |
---|
485 | #Open netcdf file for output |
---|
486 | outfile = NetCDFFile(outname, netcdf_mode_w) |
---|
487 | |
---|
488 | #Create new file |
---|
489 | outfile.institution = 'Geoscience Australia' |
---|
490 | outfile.description = 'NetCDF DEM format for compact and portable ' \ |
---|
491 | 'storage of spatial point data' |
---|
492 | |
---|
493 | #Georeferencing |
---|
494 | outfile.zone = zone |
---|
495 | outfile.projection = projection |
---|
496 | outfile.datum = datum |
---|
497 | outfile.units = units |
---|
498 | |
---|
499 | outfile.cellsize = cellsize_new |
---|
500 | outfile.NODATA_value = NODATA_value |
---|
501 | outfile.false_easting = false_easting |
---|
502 | outfile.false_northing = false_northing |
---|
503 | |
---|
504 | outfile.xllcorner = xllcorner + (x_offset * cellsize) |
---|
505 | outfile.yllcorner = yllcorner + (y_offset * cellsize) |
---|
506 | outfile.ncols = ncols_new |
---|
507 | outfile.nrows = nrows_new |
---|
508 | |
---|
509 | # dimension definition |
---|
510 | #print nrows_new, ncols_new, nrows_new*ncols_new |
---|
511 | #print type(nrows_new), type(ncols_new), type(nrows_new*ncols_new) |
---|
512 | outfile.createDimension('number_of_points', nrows_new*ncols_new) |
---|
513 | |
---|
514 | # variable definition |
---|
515 | outfile.createVariable('elevation', netcdf_float, ('number_of_points',)) |
---|
516 | |
---|
517 | # Get handle to the variable |
---|
518 | elevation = outfile.variables['elevation'] |
---|
519 | |
---|
520 | dem_elevation_r = num.reshape(dem_elevation, (nrows, ncols)) |
---|
521 | |
---|
522 | #Store data |
---|
523 | global_index = 0 |
---|
524 | for i in range(nrows_new): |
---|
525 | if verbose: log.critical('Processing row %d of %d' % (i, nrows_new)) |
---|
526 | |
---|
527 | lower_index = global_index |
---|
528 | telev = num.zeros(ncols_new, num.float) |
---|
529 | local_index = 0 |
---|
530 | trow = i * cellsize_ratio |
---|
531 | |
---|
532 | for j in range(ncols_new): |
---|
533 | tcol = j * cellsize_ratio |
---|
534 | tmp = dem_elevation_r[trow:trow+nrows_stencil, |
---|
535 | tcol:tcol+ncols_stencil] |
---|
536 | |
---|
537 | #if dem contains 1 or more NODATA_values set value in |
---|
538 | #decimated dem to NODATA_value, else compute decimated |
---|
539 | #value using stencil |
---|
540 | if num.sum(num.sum(num.equal(tmp, NODATA_value))) > 0: |
---|
541 | telev[local_index] = NODATA_value |
---|
542 | else: |
---|
543 | telev[local_index] = num.sum(num.sum(tmp * stencil)) |
---|
544 | |
---|
545 | global_index += 1 |
---|
546 | local_index += 1 |
---|
547 | |
---|
548 | upper_index = global_index |
---|
549 | |
---|
550 | elevation[lower_index:upper_index] = telev |
---|
551 | |
---|
552 | assert global_index == nrows_new*ncols_new, \ |
---|
553 | 'index not equal to number of points' |
---|
554 | |
---|
555 | infile.close() |
---|
556 | outfile.close() |
---|
557 | |
---|
558 | |
---|
559 | #### URS 2 SWW ### |
---|
560 | |
---|
561 | # Definitions of various NetCDF dimension names, etc. |
---|
562 | lon_name = 'LON' |
---|
563 | lat_name = 'LAT' |
---|
564 | time_name = 'TIME' |
---|
565 | precision = netcdf_float # So if we want to change the precision its done here |
---|
566 | |
---|
567 | ## |
---|
568 | # @brief Clas for a NetCDF data file writer. |
---|
569 | class Write_nc: |
---|
570 | """Write an nc file. |
---|
571 | |
---|
572 | Note, this should be checked to meet cdc netcdf conventions for gridded |
---|
573 | data. http://www.cdc.noaa.gov/cdc/conventions/cdc_netcdf_standard.shtml |
---|
574 | """ |
---|
575 | |
---|
576 | ## |
---|
577 | # @brief Instantiate a Write_nc instance. |
---|
578 | # @param quantity_name |
---|
579 | # @param file_name |
---|
580 | # @param time_step_count The number of time steps. |
---|
581 | # @param time_step The time_step size. |
---|
582 | # @param lon |
---|
583 | # @param lat |
---|
584 | def __init__(self, |
---|
585 | quantity_name, |
---|
586 | file_name, |
---|
587 | time_step_count, |
---|
588 | time_step, |
---|
589 | lon, |
---|
590 | lat): |
---|
591 | """Instantiate a Write_nc instance (NetCDF file writer). |
---|
592 | |
---|
593 | time_step_count is the number of time steps. |
---|
594 | time_step is the time step size |
---|
595 | |
---|
596 | pre-condition: quantity_name must be 'HA', 'UA'or 'VA'. |
---|
597 | """ |
---|
598 | |
---|
599 | self.quantity_name = quantity_name |
---|
600 | quantity_units = {'HA':'CENTIMETERS', |
---|
601 | 'UA':'CENTIMETERS/SECOND', |
---|
602 | 'VA':'CENTIMETERS/SECOND'} |
---|
603 | |
---|
604 | multiplier_dic = {'HA':100.0, # To convert from m to cm |
---|
605 | 'UA':100.0, # and m/s to cm/sec |
---|
606 | 'VA':-100.0} # MUX files have positive x in the |
---|
607 | # Southern direction. This corrects |
---|
608 | # for it, when writing nc files. |
---|
609 | |
---|
610 | self.quantity_multiplier = multiplier_dic[self.quantity_name] |
---|
611 | |
---|
612 | #self.file_name = file_name |
---|
613 | self.time_step_count = time_step_count |
---|
614 | self.time_step = time_step |
---|
615 | |
---|
616 | # NetCDF file definition |
---|
617 | self.outfile = NetCDFFile(file_name, netcdf_mode_w) |
---|
618 | outfile = self.outfile |
---|
619 | |
---|
620 | #Create new file |
---|
621 | nc_lon_lat_header(outfile, lon, lat) |
---|
622 | |
---|
623 | # TIME |
---|
624 | outfile.createDimension(time_name, None) |
---|
625 | outfile.createVariable(time_name, precision, (time_name,)) |
---|
626 | |
---|
627 | #QUANTITY |
---|
628 | outfile.createVariable(self.quantity_name, precision, |
---|
629 | (time_name, lat_name, lon_name)) |
---|
630 | outfile.variables[self.quantity_name].missing_value = -1.e+034 |
---|
631 | outfile.variables[self.quantity_name].units = \ |
---|
632 | quantity_units[self.quantity_name] |
---|
633 | outfile.variables[lon_name][:]= ensure_numeric(lon) |
---|
634 | outfile.variables[lat_name][:]= ensure_numeric(lat) |
---|
635 | |
---|
636 | #Assume no one will be wanting to read this, while we are writing |
---|
637 | #outfile.close() |
---|
638 | |
---|
639 | ## |
---|
640 | # @brief Write a time-step of quantity data. |
---|
641 | # @param quantity_slice The data to be stored for this time-step. |
---|
642 | def store_timestep(self, quantity_slice): |
---|
643 | """Write a time slice of quantity info |
---|
644 | |
---|
645 | quantity_slice is the data to be stored at this time step |
---|
646 | """ |
---|
647 | |
---|
648 | # Get the variables |
---|
649 | time = self.outfile.variables[time_name] |
---|
650 | quantity = self.outfile.variables[self.quantity_name] |
---|
651 | |
---|
652 | # get index oflice to write |
---|
653 | i = len(time) |
---|
654 | |
---|
655 | #Store time |
---|
656 | time[i] = i * self.time_step #self.domain.time |
---|
657 | quantity[i,:] = quantity_slice * self.quantity_multiplier |
---|
658 | |
---|
659 | ## |
---|
660 | # @brief Close file underlying the class instance. |
---|
661 | def close(self): |
---|
662 | self.outfile.close() |
---|
663 | |
---|
664 | |
---|
665 | |
---|
666 | ## |
---|
667 | # @brief Write an NC elevation file. |
---|
668 | # @param file_out Path to the output file. |
---|
669 | # @param lon ?? |
---|
670 | # @param lat ?? |
---|
671 | # @param depth_vector The elevation data to write. |
---|
672 | def write_elevation_nc(file_out, lon, lat, depth_vector): |
---|
673 | """Write an nc elevation file.""" |
---|
674 | |
---|
675 | # NetCDF file definition |
---|
676 | outfile = NetCDFFile(file_out, netcdf_mode_w) |
---|
677 | |
---|
678 | #Create new file |
---|
679 | nc_lon_lat_header(outfile, lon, lat) |
---|
680 | |
---|
681 | # ELEVATION |
---|
682 | zname = 'ELEVATION' |
---|
683 | outfile.createVariable(zname, precision, (lat_name, lon_name)) |
---|
684 | outfile.variables[zname].units = 'CENTIMETERS' |
---|
685 | outfile.variables[zname].missing_value = -1.e+034 |
---|
686 | |
---|
687 | outfile.variables[lon_name][:] = ensure_numeric(lon) |
---|
688 | outfile.variables[lat_name][:] = ensure_numeric(lat) |
---|
689 | |
---|
690 | depth = num.reshape(depth_vector, (len(lat), len(lon))) |
---|
691 | outfile.variables[zname][:] = depth |
---|
692 | |
---|
693 | outfile.close() |
---|
694 | |
---|
695 | |
---|
696 | ## |
---|
697 | # @brief Write lat/lon headers to a NetCDF file. |
---|
698 | # @param outfile Handle to open file to write to. |
---|
699 | # @param lon An iterable of the longitudes. |
---|
700 | # @param lat An iterable of the latitudes. |
---|
701 | # @note Defines lat/long dimensions and variables. Sets various attributes: |
---|
702 | # .point_spacing and .units |
---|
703 | # and writes lat/lon data. |
---|
704 | |
---|
705 | def nc_lon_lat_header(outfile, lon, lat): |
---|
706 | """Write lat/lon headers to a NetCDF file. |
---|
707 | |
---|
708 | outfile is the netcdf file handle. |
---|
709 | lon - a list/array of the longitudes |
---|
710 | lat - a list/array of the latitudes |
---|
711 | """ |
---|
712 | |
---|
713 | outfile.institution = 'Geoscience Australia' |
---|
714 | outfile.description = 'Converted from URS binary C' |
---|
715 | |
---|
716 | # Longitude |
---|
717 | outfile.createDimension(lon_name, len(lon)) |
---|
718 | outfile.createVariable(lon_name, precision, (lon_name,)) |
---|
719 | outfile.variables[lon_name].point_spacing = 'uneven' |
---|
720 | outfile.variables[lon_name].units = 'degrees_east' |
---|
721 | outfile.variables[lon_name].assignValue(lon) |
---|
722 | |
---|
723 | # Latitude |
---|
724 | outfile.createDimension(lat_name, len(lat)) |
---|
725 | outfile.createVariable(lat_name, precision, (lat_name,)) |
---|
726 | outfile.variables[lat_name].point_spacing = 'uneven' |
---|
727 | outfile.variables[lat_name].units = 'degrees_north' |
---|
728 | outfile.variables[lat_name].assignValue(lat) |
---|
729 | |
---|
730 | |
---|
731 | ## |
---|
732 | # @brief |
---|
733 | # @param long_lat_dep |
---|
734 | # @return A tuple (long, lat, quantity). |
---|
735 | # @note The latitude is the fastest varying dimension - in mux files. |
---|
736 | def lon_lat2grid(long_lat_dep): |
---|
737 | """ |
---|
738 | given a list of points that are assumed to be an a grid, |
---|
739 | return the long's and lat's of the grid. |
---|
740 | long_lat_dep is an array where each row is a position. |
---|
741 | The first column is longitudes. |
---|
742 | The second column is latitudes. |
---|
743 | |
---|
744 | The latitude is the fastest varying dimension - in mux files |
---|
745 | """ |
---|
746 | |
---|
747 | LONG = 0 |
---|
748 | LAT = 1 |
---|
749 | QUANTITY = 2 |
---|
750 | |
---|
751 | long_lat_dep = ensure_numeric(long_lat_dep, num.float) |
---|
752 | |
---|
753 | num_points = long_lat_dep.shape[0] |
---|
754 | this_rows_long = long_lat_dep[0,LONG] |
---|
755 | |
---|
756 | # Count the length of unique latitudes |
---|
757 | i = 0 |
---|
758 | while long_lat_dep[i,LONG] == this_rows_long and i < num_points: |
---|
759 | i += 1 |
---|
760 | |
---|
761 | # determine the lats and longsfrom the grid |
---|
762 | lat = long_lat_dep[:i, LAT] |
---|
763 | long = long_lat_dep[::i, LONG] |
---|
764 | |
---|
765 | lenlong = len(long) |
---|
766 | lenlat = len(lat) |
---|
767 | |
---|
768 | msg = 'Input data is not gridded' |
---|
769 | assert num_points % lenlat == 0, msg |
---|
770 | assert num_points % lenlong == 0, msg |
---|
771 | |
---|
772 | # Test that data is gridded |
---|
773 | for i in range(lenlong): |
---|
774 | msg = 'Data is not gridded. It must be for this operation' |
---|
775 | first = i * lenlat |
---|
776 | last = first + lenlat |
---|
777 | |
---|
778 | assert num.allclose(long_lat_dep[first:last,LAT], lat), msg |
---|
779 | assert num.allclose(long_lat_dep[first:last,LONG], long[i]), msg |
---|
780 | |
---|
781 | msg = 'Out of range latitudes/longitudes' |
---|
782 | for l in lat:assert -90 < l < 90 , msg |
---|
783 | for l in long:assert -180 < l < 180 , msg |
---|
784 | |
---|
785 | # Changing quantity from lat being the fastest varying dimension to |
---|
786 | # long being the fastest varying dimension |
---|
787 | # FIXME - make this faster/do this a better way |
---|
788 | # use numeric transpose, after reshaping the quantity vector |
---|
789 | quantity = num.zeros(num_points, num.float) |
---|
790 | |
---|
791 | for lat_i, _ in enumerate(lat): |
---|
792 | for long_i, _ in enumerate(long): |
---|
793 | q_index = lat_i*lenlong + long_i |
---|
794 | lld_index = long_i*lenlat + lat_i |
---|
795 | temp = long_lat_dep[lld_index, QUANTITY] |
---|
796 | quantity[q_index] = temp |
---|
797 | |
---|
798 | return long, lat, quantity |
---|
799 | |
---|
800 | ################################################################################ |
---|
801 | # END URS 2 SWW |
---|
802 | ################################################################################ |
---|
803 | |
---|
804 | |
---|
805 | |
---|
806 | |
---|
807 | |
---|
808 | ## |
---|
809 | # @brief Convert points to a polygon (?) |
---|
810 | # @param points_file The points file. |
---|
811 | # @param minimum_triangle_angle ?? |
---|
812 | # @return |
---|
813 | def points2polygon(points_file, minimum_triangle_angle=3.0): |
---|
814 | """ |
---|
815 | WARNING: This function is not fully working. |
---|
816 | |
---|
817 | Function to return a polygon returned from alpha shape, given a points file. |
---|
818 | |
---|
819 | WARNING: Alpha shape returns multiple polygons, but this function only |
---|
820 | returns one polygon. |
---|
821 | """ |
---|
822 | |
---|
823 | from anuga.pmesh.mesh import Mesh, importMeshFromFile |
---|
824 | from anuga.shallow_water import Domain |
---|
825 | from anuga.pmesh.mesh_interface import create_mesh_from_regions |
---|
826 | |
---|
827 | mesh = importMeshFromFile(points_file) |
---|
828 | mesh.auto_segment() |
---|
829 | mesh.exportASCIIsegmentoutlinefile("outline.tsh") |
---|
830 | mesh2 = importMeshFromFile("outline.tsh") |
---|
831 | mesh2.generate_mesh(maximum_triangle_area=1000000000, |
---|
832 | minimum_triangle_angle=minimum_triangle_angle, |
---|
833 | verbose=False) |
---|
834 | mesh2.export_mesh_file('outline_meshed.tsh') |
---|
835 | domain = Domain("outline_meshed.tsh", use_cache = False) |
---|
836 | polygon = domain.get_boundary_polygon() |
---|
837 | return polygon |
---|
838 | |
---|
839 | |
---|
840 | ################################################################################ |
---|
841 | |
---|
842 | if __name__ == "__main__": |
---|
843 | # setting umask from config to force permissions for all files and |
---|
844 | # directories created to the same. (it was noticed the "mpirun" doesn't |
---|
845 | # honour the umask set in your .bashrc etc file) |
---|
846 | |
---|
847 | from config import umask |
---|
848 | import os |
---|
849 | os.umask(umask) |
---|