1 | """This module contains various auxiliary function used by pyvolution. |
---|
2 | |
---|
3 | It is also a clearing house for functions that may later earn a module |
---|
4 | of their own. |
---|
5 | """ |
---|
6 | |
---|
7 | import anuga.geometry.polygon |
---|
8 | import sys |
---|
9 | import os |
---|
10 | |
---|
11 | from os import remove, mkdir, access, F_OK, R_OK, W_OK, sep,getcwd |
---|
12 | from os.path import exists, basename, split,join |
---|
13 | from warnings import warn |
---|
14 | from shutil import copy |
---|
15 | |
---|
16 | from anuga.utilities.numerical_tools import ensure_numeric, angle, NAN |
---|
17 | from anuga.file.csv_file import load_csv_as_dict |
---|
18 | |
---|
19 | from math import sqrt, atan, degrees |
---|
20 | |
---|
21 | # FIXME (Ole): Temporary short cuts - |
---|
22 | # FIXME (Ole): remove and update scripts where they are used |
---|
23 | from anuga.utilities.system_tools import get_revision_number |
---|
24 | from anuga.utilities.system_tools import store_version_info |
---|
25 | |
---|
26 | import anuga.utilities.log as log |
---|
27 | |
---|
28 | import numpy as num |
---|
29 | |
---|
30 | |
---|
31 | def file_function(filename, |
---|
32 | domain=None, |
---|
33 | quantities=None, |
---|
34 | interpolation_points=None, |
---|
35 | time_thinning=1, |
---|
36 | time_limit=None, |
---|
37 | verbose=False, |
---|
38 | use_cache=False, |
---|
39 | boundary_polygon=None, |
---|
40 | output_centroids=False): |
---|
41 | from file_function import file_function as file_function_new |
---|
42 | return file_function_new(filename, domain, quantities, interpolation_points, |
---|
43 | time_thinning, time_limit, verbose, use_cache, |
---|
44 | boundary_polygon, output_centroids) |
---|
45 | |
---|
46 | def multiple_replace(text, dictionary): |
---|
47 | """Multiple replace of words in text |
---|
48 | |
---|
49 | text: String to be modified |
---|
50 | dictionary: Mapping of words that are to be substituted |
---|
51 | |
---|
52 | Python Cookbook 3.14 page 88 and page 90 |
---|
53 | http://code.activestate.com/recipes/81330/ |
---|
54 | """ |
---|
55 | |
---|
56 | import re |
---|
57 | |
---|
58 | #Create a regular expression from all of the dictionary keys |
---|
59 | #matching only entire words |
---|
60 | regex = re.compile(r'\b'+ \ |
---|
61 | r'\b|\b'.join(map(re.escape, dictionary.keys()))+ \ |
---|
62 | r'\b' ) |
---|
63 | |
---|
64 | #For each match, lookup the corresponding value in the dictionary |
---|
65 | return regex.sub(lambda match: dictionary[match.group(0)], text) |
---|
66 | |
---|
67 | |
---|
68 | def apply_expression_to_dictionary(expression, dictionary): |
---|
69 | """Apply arbitrary expression to values of dictionary |
---|
70 | |
---|
71 | Given an expression in terms of the keys, replace key by the |
---|
72 | corresponding values and evaluate. |
---|
73 | |
---|
74 | expression: Arbitrary, e.g. arithmetric, expression relating keys |
---|
75 | from dictionary. |
---|
76 | |
---|
77 | dictionary: Mapping between symbols in expression and objects that |
---|
78 | will be evaluated by expression. |
---|
79 | Values in dictionary must support operators given in |
---|
80 | expression e.g. by overloading |
---|
81 | |
---|
82 | Due to a limitation with numeric, this can not evaluate 0/0 |
---|
83 | In general, the user can fix by adding 1e-30 to the numerator. |
---|
84 | SciPy core can handle this situation. |
---|
85 | """ |
---|
86 | |
---|
87 | import types |
---|
88 | import re |
---|
89 | |
---|
90 | assert isinstance(expression, basestring) |
---|
91 | assert type(dictionary) == types.DictType |
---|
92 | |
---|
93 | #Convert dictionary values to textual representations suitable for eval |
---|
94 | D = {} |
---|
95 | for key in dictionary: |
---|
96 | D[key] = 'dictionary["%s"]' % key |
---|
97 | |
---|
98 | #Perform substitution of variables |
---|
99 | expression = multiple_replace(expression, D) |
---|
100 | |
---|
101 | #Evaluate and return |
---|
102 | try: |
---|
103 | return eval(expression) |
---|
104 | except NameError, e: |
---|
105 | msg = 'Expression "%s" could not be evaluated: %s' % (expression, e) |
---|
106 | raise NameError(msg) |
---|
107 | except ValueError, e: |
---|
108 | msg = 'Expression "%s" could not be evaluated: %s' % (expression, e) |
---|
109 | raise ValueError(msg) |
---|
110 | |
---|
111 | |
---|
112 | def get_textual_float(value, format = '%.2f'): |
---|
113 | """Get textual representation of floating point numbers |
---|
114 | and accept None as valid entry |
---|
115 | |
---|
116 | format is a string - default = '%.2f' |
---|
117 | """ |
---|
118 | |
---|
119 | if value is None: |
---|
120 | return 'None' |
---|
121 | else: |
---|
122 | try: |
---|
123 | float(value) |
---|
124 | except: |
---|
125 | # May this is a vector |
---|
126 | if len(value) > 1: |
---|
127 | s = '(' |
---|
128 | for v in value: |
---|
129 | s += get_textual_float(v, format) + ', ' |
---|
130 | |
---|
131 | s = s[:-2] + ')' # Strip trailing comma and close |
---|
132 | return s |
---|
133 | else: |
---|
134 | raise Exception('Illegal input to get_textual_float: %s' % str(value)) |
---|
135 | else: |
---|
136 | return format % float(value) |
---|
137 | |
---|
138 | def get_gauges_from_file(filename): |
---|
139 | return gauge_get_from_file(filename) |
---|
140 | |
---|
141 | |
---|
142 | def check_list(quantity): |
---|
143 | """ Check that input quantities in quantity list are possible |
---|
144 | """ |
---|
145 | import sys |
---|
146 | |
---|
147 | all_quantity = ['stage', 'depth', 'momentum', 'xmomentum', |
---|
148 | 'ymomentum', 'speed', 'bearing', 'elevation'] |
---|
149 | |
---|
150 | # convert all quanitiy names to lowercase |
---|
151 | for i,j in enumerate(quantity): |
---|
152 | quantity[i] = quantity[i].lower() |
---|
153 | |
---|
154 | # check that all names in 'quantity' appear in 'all_quantity' |
---|
155 | p = list(set(quantity).difference(set(all_quantity))) |
---|
156 | if len(p) != 0: |
---|
157 | msg = 'Quantities %s do not exist - please try again' % p |
---|
158 | raise Exception(msg) |
---|
159 | |
---|
160 | |
---|
161 | def calc_bearing(uh, vh): |
---|
162 | """ Calculate velocity bearing from North |
---|
163 | """ |
---|
164 | #FIXME (Ole): I reckon we should refactor this one to use |
---|
165 | # the function angle() in utilities/numerical_tools |
---|
166 | # |
---|
167 | # It will be a simple matter of |
---|
168 | # * converting from radians to degrees |
---|
169 | # * moving the reference direction from [1,0] to North |
---|
170 | # * changing from counter clockwise to clocwise. |
---|
171 | |
---|
172 | # if indeterminate, just return |
---|
173 | if uh==0 and vh==0: |
---|
174 | return NAN |
---|
175 | |
---|
176 | return degrees(angle([uh, vh], [0, -1])) |
---|
177 | |
---|
178 | |
---|
179 | def add_directories(root_directory, directories): |
---|
180 | """ |
---|
181 | Add the first sub-directory in 'directories' to root_directory. |
---|
182 | Then add the second sub-directory to the accumulating path and so on. |
---|
183 | |
---|
184 | Return the path of the final directory. |
---|
185 | |
---|
186 | This is handy for specifying and creating a directory where data will go. |
---|
187 | """ |
---|
188 | dir = root_directory |
---|
189 | for new_dir in directories: |
---|
190 | dir = os.path.join(dir, new_dir) |
---|
191 | if not access(dir,F_OK): |
---|
192 | mkdir(dir) |
---|
193 | return dir |
---|
194 | |
---|
195 | |
---|
196 | def store_parameters(verbose=False,**kwargs): |
---|
197 | """Temporary Interface to new location""" |
---|
198 | |
---|
199 | from anuga.shallow_water.data_manager \ |
---|
200 | import store_parameters as dm_store_parameters |
---|
201 | log.critical('store_parameters has moved from util.py.') |
---|
202 | log.critical('Please use "from anuga.shallow_water.data_manager ' |
---|
203 | 'import store_parameters"') |
---|
204 | |
---|
205 | return dm_store_parameters(verbose=False,**kwargs) |
---|
206 | |
---|
207 | |
---|
208 | def remove_lone_verts(verts, triangles, number_of_full_nodes=None): |
---|
209 | """Removes vertices that are not associated with any triangles. |
---|
210 | |
---|
211 | verts is a list/array of points. |
---|
212 | triangles is a list of 3 element tuples. Each tuple represents a triangle. |
---|
213 | number_of_full_nodes relate to parallelism when a mesh has an |
---|
214 | extra layer of ghost points. |
---|
215 | """ |
---|
216 | |
---|
217 | verts = ensure_numeric(verts) |
---|
218 | triangles = ensure_numeric(triangles) |
---|
219 | |
---|
220 | N = len(verts) |
---|
221 | |
---|
222 | # initialise the array to easily find the index of the first loner |
---|
223 | # ie, if N=3 -> [6,5,4] |
---|
224 | loners=num.arange(2*N, N, -1) |
---|
225 | for t in triangles: |
---|
226 | for vert in t: |
---|
227 | loners[vert]= vert # all non-loners will have loners[i]=i |
---|
228 | |
---|
229 | lone_start = 2*N - max(loners) # The index of the first loner |
---|
230 | |
---|
231 | if lone_start-1 == N: |
---|
232 | # no loners |
---|
233 | pass |
---|
234 | elif min(loners[lone_start:N]) > N: |
---|
235 | # All the loners are at the end of the vert array |
---|
236 | verts = verts[0:lone_start] |
---|
237 | else: |
---|
238 | # change the loners list so it can be used to modify triangles |
---|
239 | # Remove the loners from verts |
---|
240 | # Could've used X=compress(less(loners,N),loners) |
---|
241 | # verts=num.take(verts,X,axis=0) to Remove the loners from verts |
---|
242 | # but I think it would use more memory |
---|
243 | new_i = lone_start # point at first loner - 'shuffle down' target |
---|
244 | for i in range(lone_start, N): |
---|
245 | if loners[i] >= N: # [i] is a loner, leave alone |
---|
246 | pass |
---|
247 | else: # a non-loner, move down |
---|
248 | loners[i] = new_i |
---|
249 | verts[new_i] = verts[i] |
---|
250 | new_i += 1 |
---|
251 | verts = verts[0:new_i] |
---|
252 | |
---|
253 | # Modify the triangles |
---|
254 | triangles = num.choose(triangles,loners) |
---|
255 | return verts, triangles |
---|
256 | |
---|
257 | |
---|
258 | def get_centroid_values(x, triangles): |
---|
259 | """Compute centroid values from vertex values |
---|
260 | |
---|
261 | x: Values at vertices of triangular mesh |
---|
262 | triangles: Nx3 integer array pointing to vertex information |
---|
263 | for each of the N triangels. Elements of triangles are |
---|
264 | indices into x |
---|
265 | """ |
---|
266 | |
---|
267 | xc = num.zeros(triangles.shape[0], num.float) # Space for centroid info |
---|
268 | |
---|
269 | for k in range(triangles.shape[0]): |
---|
270 | # Indices of vertices |
---|
271 | i0 = triangles[k][0] |
---|
272 | i1 = triangles[k][1] |
---|
273 | i2 = triangles[k][2] |
---|
274 | |
---|
275 | xc[k] = (x[i0] + x[i1] + x[i2])/3 |
---|
276 | |
---|
277 | return xc |
---|
278 | |
---|
279 | |
---|
280 | def make_plots_from_csv_file(directories_dic={dir:['gauge', 0, 0]}, |
---|
281 | output_dir='', |
---|
282 | base_name='', |
---|
283 | plot_numbers=['3-5'], |
---|
284 | quantities=['speed','stage','momentum'], |
---|
285 | assess_all_csv_files=True, |
---|
286 | extra_plot_name='test'): |
---|
287 | |
---|
288 | msg = 'make_plots_from_csv_file has been replaced by csv2timeseries_graphs ' |
---|
289 | msg += 'Please use "from anuga.abstract_2d_finite_volumes.util import ' \ |
---|
290 | 'csv2timeseries_graphs"' |
---|
291 | raise Exception(msg) |
---|
292 | |
---|
293 | return csv2timeseries_graphs(directories_dic, |
---|
294 | output_dir, |
---|
295 | base_name, |
---|
296 | plot_numbers, |
---|
297 | quantities, |
---|
298 | extra_plot_name, |
---|
299 | assess_all_csv_files) |
---|
300 | |
---|
301 | |
---|
302 | def csv2timeseries_graphs(directories_dic={}, |
---|
303 | output_dir='', |
---|
304 | base_name=None, |
---|
305 | plot_numbers='', |
---|
306 | quantities=['stage'], |
---|
307 | extra_plot_name='', |
---|
308 | assess_all_csv_files=True, |
---|
309 | create_latex=False, |
---|
310 | verbose=False): |
---|
311 | |
---|
312 | """ |
---|
313 | Read in csv files that have the right header information and |
---|
314 | plot time series such as Stage, Speed, etc. Will also plot several |
---|
315 | time series on one plot. Filenames must follow this convention, |
---|
316 | <base_name><plot_number>.csv eg gauge_timeseries3.csv |
---|
317 | |
---|
318 | NOTE: relies that 'elevation' is in the csv file! |
---|
319 | |
---|
320 | Each file represents a location and within each file there are |
---|
321 | time, quantity columns. |
---|
322 | |
---|
323 | For example: |
---|
324 | if "directories_dic" defines 4 directories and in each directories |
---|
325 | there is a csv files corresponding to the right "plot_numbers", |
---|
326 | this will create a plot with 4 lines one for each directory AND |
---|
327 | one plot for each "quantities". ??? FIXME: unclear. |
---|
328 | |
---|
329 | Usage: |
---|
330 | csv2timeseries_graphs(directories_dic={'slide'+sep:['Slide',0, 0], |
---|
331 | 'fixed_wave'+sep:['Fixed Wave',0,0]}, |
---|
332 | output_dir='fixed_wave'+sep, |
---|
333 | base_name='gauge_timeseries_', |
---|
334 | plot_numbers='', |
---|
335 | quantities=['stage','speed'], |
---|
336 | extra_plot_name='', |
---|
337 | assess_all_csv_files=True, |
---|
338 | create_latex=False, |
---|
339 | verbose=True) |
---|
340 | this will create one plot for stage with both 'slide' and |
---|
341 | 'fixed_wave' lines on it for stage and speed for each csv |
---|
342 | file with 'gauge_timeseries_' as the prefix. The graghs |
---|
343 | will be in the output directory 'fixed_wave' and the graph |
---|
344 | axis will be determined by assessing all the |
---|
345 | |
---|
346 | ANOTHER EXAMPLE |
---|
347 | new_csv2timeseries_graphs(directories_dic={'slide'+sep:['Slide',0, 0], |
---|
348 | 'fixed_wave'+sep:['Fixed Wave',0,0]}, |
---|
349 | output_dir='fixed_wave'+sep, |
---|
350 | base_name='gauge_timeseries_', |
---|
351 | plot_numbers=['1-3'], |
---|
352 | quantities=['stage','speed'], |
---|
353 | extra_plot_name='', |
---|
354 | assess_all_csv_files=False, |
---|
355 | create_latex=False, |
---|
356 | verbose=True) |
---|
357 | This will plot csv files called gauge_timeseries_1.csv and |
---|
358 | gauge_timeseries3.csv from both 'slide' and 'fixed_wave' directories |
---|
359 | to 'fixed_wave'. There will be 4 plots created two speed and two stage |
---|
360 | one for each csv file. There will be two lines on each of these plots. |
---|
361 | And the axis will have been determined from only these files, had |
---|
362 | assess_all_csv_files = True all csv file with 'gauges_timeseries_' prefix |
---|
363 | would of been assessed. |
---|
364 | |
---|
365 | ANOTHER EXAMPLE |
---|
366 | csv2timeseries_graphs({'J:'+sep+'anuga_validation'+sep:['new',20,-.1], |
---|
367 | 'J:'+sep+'conical_island'+sep:['test',0,0]}, |
---|
368 | output_dir='', |
---|
369 | plot_numbers=['1','3'], |
---|
370 | quantities=['stage','depth','bearing'], |
---|
371 | base_name='gauge_b', |
---|
372 | assess_all_csv_files=True, |
---|
373 | verbose=True) |
---|
374 | |
---|
375 | This will produce one plot for each quantity (therefore 3) in the |
---|
376 | current directory, each plot will have 2 lines on them. The first |
---|
377 | plot named 'new' will have the time offseted by 20secs and the stage |
---|
378 | height adjusted by -0.1m |
---|
379 | |
---|
380 | Inputs: |
---|
381 | directories_dic: dictionary of directory with values (plot |
---|
382 | legend name for directory), (start time of |
---|
383 | the time series) and the (value to add to |
---|
384 | stage if needed). For example |
---|
385 | {dir1:['Anuga_ons',5000, 0], |
---|
386 | dir2:['b_emoth',5000,1.5], |
---|
387 | dir3:['b_ons',5000,1.5]} |
---|
388 | Having multiple directories defined will plot them on |
---|
389 | one plot, therefore there will be 3 lines on each of |
---|
390 | these plot. If you only want one line per plot call |
---|
391 | csv2timeseries_graph separately for each directory, |
---|
392 | eg only have one directory in the 'directories_dic' in |
---|
393 | each call. |
---|
394 | |
---|
395 | output_dir: directory for the plot outputs. Only important to define when |
---|
396 | you have more than one directory in your directories_dic, if |
---|
397 | you have not defined it and you have multiple directories in |
---|
398 | 'directories_dic' there will be plots in each directory, |
---|
399 | however only one directory will contain the complete |
---|
400 | plot/graphs. |
---|
401 | |
---|
402 | base_name: Is used a couple of times. |
---|
403 | 1) to find the csv files to be plotted if there is no |
---|
404 | 'plot_numbers' then csv files with 'base_name' are plotted |
---|
405 | 2) in the title of the plots, the length of base_name is |
---|
406 | removed from the front of the filename to be used in the |
---|
407 | title. |
---|
408 | This could be changed if needed. |
---|
409 | Note is ignored if assess_all_csv_files=True |
---|
410 | |
---|
411 | plot_numbers: a String list of numbers to plot. For example |
---|
412 | [0-4,10,15-17] will read and attempt to plot |
---|
413 | the follow 0,1,2,3,4,10,15,16,17 |
---|
414 | NOTE: if no plot numbers this will create one plot per |
---|
415 | quantity, per gauge |
---|
416 | |
---|
417 | quantities: Will get available quantities from the header in the csv |
---|
418 | file. Quantities must be one of these. |
---|
419 | NOTE: ALL QUANTITY NAMES MUST BE lower case! |
---|
420 | |
---|
421 | extra_plot_name: A string that is appended to the end of the |
---|
422 | output filename. |
---|
423 | |
---|
424 | assess_all_csv_files: if true it will read ALL csv file with |
---|
425 | "base_name", regardless of 'plot_numbers' |
---|
426 | and determine a uniform set of axes for |
---|
427 | Stage, Speed and Momentum. IF FALSE it |
---|
428 | will only read the csv file within the |
---|
429 | 'plot_numbers' |
---|
430 | |
---|
431 | create_latex: NOT IMPLEMENTED YET!! sorry Jane.... |
---|
432 | |
---|
433 | OUTPUTS: saves the plots to |
---|
434 | <output_dir><base_name><plot_number><extra_plot_name>.png |
---|
435 | """ |
---|
436 | |
---|
437 | try: |
---|
438 | import pylab |
---|
439 | except ImportError: |
---|
440 | msg='csv2timeseries_graphs needs pylab to be installed correctly' |
---|
441 | raise Exception(msg) |
---|
442 | #ANUGA don't need pylab to work so the system doesn't |
---|
443 | #rely on pylab being installed |
---|
444 | return |
---|
445 | |
---|
446 | from os import sep |
---|
447 | from anuga.utilities.file_utils import get_all_files_with_extension |
---|
448 | |
---|
449 | seconds_in_hour = 3600 |
---|
450 | seconds_in_minutes = 60 |
---|
451 | |
---|
452 | quantities_label={} |
---|
453 | # quantities_label['time'] = 'time (hours)' |
---|
454 | quantities_label['time'] = 'time (minutes)' |
---|
455 | quantities_label['stage'] = 'wave height (m)' |
---|
456 | quantities_label['speed'] = 'speed (m/s)' |
---|
457 | quantities_label['momentum'] = 'momentum (m^2/sec)' |
---|
458 | quantities_label['depth'] = 'water depth (m)' |
---|
459 | quantities_label['xmomentum'] = 'momentum (m^2/sec)' |
---|
460 | quantities_label['ymomentum'] = 'momentum (m^2/sec)' |
---|
461 | quantities_label['bearing'] = 'degrees (o)' |
---|
462 | quantities_label['elevation'] = 'elevation (m)' |
---|
463 | |
---|
464 | if extra_plot_name != '': |
---|
465 | extra_plot_name = '_' + extra_plot_name |
---|
466 | |
---|
467 | new_plot_numbers=[] |
---|
468 | #change plot_numbers to list, eg ['0-4','10'] |
---|
469 | #to ['0','1','2','3','4','10'] |
---|
470 | for i, num_string in enumerate(plot_numbers): |
---|
471 | if '-' in num_string: |
---|
472 | start = int(num_string[:num_string.rfind('-')]) |
---|
473 | end = int(num_string[num_string.rfind('-') + 1:]) + 1 |
---|
474 | for x in range(start, end): |
---|
475 | new_plot_numbers.append(str(x)) |
---|
476 | else: |
---|
477 | new_plot_numbers.append(num_string) |
---|
478 | |
---|
479 | #finds all the files that fit the specs provided and return a list of them |
---|
480 | #so to help find a uniform max and min for the plots... |
---|
481 | list_filenames=[] |
---|
482 | all_csv_filenames=[] |
---|
483 | if verbose: log.critical('Determining files to access for axes ranges.') |
---|
484 | |
---|
485 | for i,directory in enumerate(directories_dic.keys()): |
---|
486 | all_csv_filenames.append(get_all_files_with_extension(directory, |
---|
487 | base_name, '.csv')) |
---|
488 | |
---|
489 | filenames=[] |
---|
490 | if plot_numbers == '': |
---|
491 | list_filenames.append(get_all_files_with_extension(directory, |
---|
492 | base_name,'.csv')) |
---|
493 | else: |
---|
494 | for number in new_plot_numbers: |
---|
495 | filenames.append(base_name + number) |
---|
496 | list_filenames.append(filenames) |
---|
497 | |
---|
498 | #use all the files to get the values for the plot axis |
---|
499 | max_start_time= -1000. |
---|
500 | min_start_time = 100000 |
---|
501 | |
---|
502 | if verbose: log.critical('Determining uniform axes') |
---|
503 | |
---|
504 | #this entire loop is to determine the min and max range for the |
---|
505 | #axes of the plots |
---|
506 | |
---|
507 | # quantities.insert(0,'elevation') |
---|
508 | quantities.insert(0,'time') |
---|
509 | |
---|
510 | directory_quantity_value={} |
---|
511 | # quantity_value={} |
---|
512 | min_quantity_value={} |
---|
513 | max_quantity_value={} |
---|
514 | |
---|
515 | for i, directory in enumerate(directories_dic.keys()): |
---|
516 | filename_quantity_value = {} |
---|
517 | if assess_all_csv_files == False: |
---|
518 | which_csv_to_assess = list_filenames[i] |
---|
519 | else: |
---|
520 | #gets list of filenames for directory "i" |
---|
521 | which_csv_to_assess = all_csv_filenames[i] |
---|
522 | |
---|
523 | for j, filename in enumerate(which_csv_to_assess): |
---|
524 | quantity_value = {} |
---|
525 | |
---|
526 | dir_filename = join(directory,filename) |
---|
527 | attribute_dic, title_index_dic = load_csv_as_dict(dir_filename + '.csv') |
---|
528 | directory_start_time = directories_dic[directory][1] |
---|
529 | directory_add_tide = directories_dic[directory][2] |
---|
530 | |
---|
531 | if verbose: log.critical('reading: %s.csv' % dir_filename) |
---|
532 | |
---|
533 | #add time to get values |
---|
534 | for k, quantity in enumerate(quantities): |
---|
535 | quantity_value[quantity] = [float(x) for |
---|
536 | x in attribute_dic[quantity]] |
---|
537 | |
---|
538 | #add tide to stage if provided |
---|
539 | if quantity == 'stage': |
---|
540 | quantity_value[quantity] = num.array(quantity_value[quantity], |
---|
541 | num.float) + directory_add_tide |
---|
542 | |
---|
543 | #condition to find max and mins for all the plots |
---|
544 | # populate the list with something when i=0 and j=0 and |
---|
545 | # then compare to the other values to determine abs max and min |
---|
546 | if i==0 and j==0: |
---|
547 | min_quantity_value[quantity], \ |
---|
548 | max_quantity_value[quantity] = \ |
---|
549 | get_min_max_values(quantity_value[quantity]) |
---|
550 | |
---|
551 | if quantity != 'time': |
---|
552 | min_quantity_value[quantity] = \ |
---|
553 | min_quantity_value[quantity] *1.1 |
---|
554 | max_quantity_value[quantity] = \ |
---|
555 | max_quantity_value[quantity] *1.1 |
---|
556 | else: |
---|
557 | min, max = get_min_max_values(quantity_value[quantity]) |
---|
558 | |
---|
559 | # min and max are multipled by "1+increase_axis" to get axes |
---|
560 | # that are slighty bigger than the max and mins |
---|
561 | # so the plots look good. |
---|
562 | |
---|
563 | increase_axis = (max-min)*0.05 |
---|
564 | if min <= min_quantity_value[quantity]: |
---|
565 | if quantity == 'time': |
---|
566 | min_quantity_value[quantity] = min |
---|
567 | else: |
---|
568 | if round(min,2) == 0.00: |
---|
569 | min_quantity_value[quantity] = -increase_axis |
---|
570 | # min_quantity_value[quantity] = -2. |
---|
571 | #min_quantity_value[quantity] = \ |
---|
572 | # -max_quantity_value[quantity]*increase_axis |
---|
573 | else: |
---|
574 | # min_quantity_value[quantity] = \ |
---|
575 | # min*(1+increase_axis) |
---|
576 | min_quantity_value[quantity]=min-increase_axis |
---|
577 | |
---|
578 | if max > max_quantity_value[quantity]: |
---|
579 | if quantity == 'time': |
---|
580 | max_quantity_value[quantity] = max |
---|
581 | else: |
---|
582 | max_quantity_value[quantity] = max + increase_axis |
---|
583 | # max_quantity_value[quantity]=max*(1+increase_axis) |
---|
584 | |
---|
585 | #set the time... ??? |
---|
586 | if min_start_time > directory_start_time: |
---|
587 | min_start_time = directory_start_time |
---|
588 | if max_start_time < directory_start_time: |
---|
589 | max_start_time = directory_start_time |
---|
590 | |
---|
591 | filename_quantity_value[filename]=quantity_value |
---|
592 | |
---|
593 | directory_quantity_value[directory]=filename_quantity_value |
---|
594 | |
---|
595 | #final step to unifrom axis for the graphs |
---|
596 | quantities_axis={} |
---|
597 | |
---|
598 | for i, quantity in enumerate(quantities): |
---|
599 | quantities_axis[quantity] = (float(min_start_time) \ |
---|
600 | / float(seconds_in_minutes), |
---|
601 | (float(max_quantity_value['time']) \ |
---|
602 | + float(max_start_time)) \ |
---|
603 | / float(seconds_in_minutes), |
---|
604 | min_quantity_value[quantity], |
---|
605 | max_quantity_value[quantity]) |
---|
606 | |
---|
607 | if verbose and (quantity != 'time' and quantity != 'elevation'): |
---|
608 | log.critical('axis for quantity %s are x:(%s to %s)%s ' |
---|
609 | 'and y:(%s to %s)%s' |
---|
610 | % (quantity, quantities_axis[quantity][0], |
---|
611 | quantities_axis[quantity][1], |
---|
612 | quantities_label['time'], |
---|
613 | quantities_axis[quantity][2], |
---|
614 | quantities_axis[quantity][3], |
---|
615 | quantities_label[quantity])) |
---|
616 | |
---|
617 | cstr = ['b', 'r', 'g', 'c', 'm', 'y', 'k'] |
---|
618 | |
---|
619 | if verbose: log.critical('Now start to plot') |
---|
620 | |
---|
621 | i_max = len(directories_dic.keys()) |
---|
622 | legend_list_dic = {} |
---|
623 | legend_list = [] |
---|
624 | for i, directory in enumerate(directories_dic.keys()): |
---|
625 | if verbose: log.critical('Plotting in %s %s' |
---|
626 | % (directory, new_plot_numbers)) |
---|
627 | |
---|
628 | # FIXME THIS SORT IS VERY IMPORTANT |
---|
629 | # Without it the assigned plot numbers may not work correctly |
---|
630 | # there must be a better way |
---|
631 | list_filenames[i].sort() |
---|
632 | for j, filename in enumerate(list_filenames[i]): |
---|
633 | if verbose: log.critical('Starting %s' % filename) |
---|
634 | |
---|
635 | directory_name = directories_dic[directory][0] |
---|
636 | directory_start_time = directories_dic[directory][1] |
---|
637 | directory_add_tide = directories_dic[directory][2] |
---|
638 | |
---|
639 | # create an if about the start time and tide height if don't exist |
---|
640 | attribute_dic, title_index_dic = load_csv_as_dict(directory + sep |
---|
641 | + filename + '.csv') |
---|
642 | #get data from dict in to list |
---|
643 | #do maths to list by changing to array |
---|
644 | t = (num.array(directory_quantity_value[directory][filename]['time']) |
---|
645 | + directory_start_time) / seconds_in_minutes |
---|
646 | |
---|
647 | #finds the maximum elevation, used only as a test |
---|
648 | # and as info in the graphs |
---|
649 | max_ele=-100000 |
---|
650 | min_ele=100000 |
---|
651 | elevation = [float(x) for x in attribute_dic["elevation"]] |
---|
652 | |
---|
653 | min_ele, max_ele = get_min_max_values(elevation) |
---|
654 | |
---|
655 | if min_ele != max_ele: |
---|
656 | log.critical("Note! Elevation changes in %s" % dir_filename) |
---|
657 | |
---|
658 | # creates a dictionary with keys that is the filename and attributes |
---|
659 | # are a list of lists containing 'directory_name' and 'elevation'. |
---|
660 | # This is used to make the contents for the legends in the graphs, |
---|
661 | # this is the name of the model and the elevation. All in this |
---|
662 | # great one liner from DG. If the key 'filename' doesn't exist it |
---|
663 | # creates the entry if the entry exist it appends to the key. |
---|
664 | |
---|
665 | legend_list_dic.setdefault(filename,[]) \ |
---|
666 | .append([directory_name, round(max_ele, 3)]) |
---|
667 | |
---|
668 | # creates a LIST for the legend on the last iteration of the |
---|
669 | # directories which is when "legend_list_dic" has been fully |
---|
670 | # populated. Creates a list of strings which is used in the legend |
---|
671 | # only runs on the last iteration for all the gauges(csv) files |
---|
672 | # empties the list before creating it |
---|
673 | |
---|
674 | if i == i_max - 1: |
---|
675 | legend_list = [] |
---|
676 | |
---|
677 | for name_and_elevation in legend_list_dic[filename]: |
---|
678 | legend_list.append('%s (elevation = %sm)'\ |
---|
679 | % (name_and_elevation[0], |
---|
680 | name_and_elevation[1])) |
---|
681 | |
---|
682 | #skip time and elevation so it is not plotted! |
---|
683 | for k, quantity in enumerate(quantities): |
---|
684 | if quantity != 'time' and quantity != 'elevation': |
---|
685 | pylab.figure(int(k*100+j)) |
---|
686 | pylab.ylabel(quantities_label[quantity]) |
---|
687 | pylab.plot(t, |
---|
688 | directory_quantity_value[directory]\ |
---|
689 | [filename][quantity], |
---|
690 | c = cstr[i], linewidth=1) |
---|
691 | pylab.xlabel(quantities_label['time']) |
---|
692 | pylab.axis(quantities_axis[quantity]) |
---|
693 | pylab.legend(legend_list,loc='upper right') |
---|
694 | |
---|
695 | pylab.title('%s at %s gauge' |
---|
696 | % (quantity, filename[len(base_name):])) |
---|
697 | |
---|
698 | if output_dir == '': |
---|
699 | figname = '%s%s%s_%s%s.png' \ |
---|
700 | % (directory, sep, filename, quantity, |
---|
701 | extra_plot_name) |
---|
702 | else: |
---|
703 | figname = '%s%s%s_%s%s.png' \ |
---|
704 | % (output_dir, sep, filename, quantity, |
---|
705 | extra_plot_name) |
---|
706 | |
---|
707 | if verbose: log.critical('saving figure here %s' % figname) |
---|
708 | |
---|
709 | pylab.savefig(figname) |
---|
710 | |
---|
711 | if verbose: log.critical('Closing all plots') |
---|
712 | |
---|
713 | pylab.close('all') |
---|
714 | del pylab |
---|
715 | |
---|
716 | if verbose: log.critical('Finished closing plots') |
---|
717 | |
---|
718 | def get_min_max_values(list=None): |
---|
719 | """ |
---|
720 | Returns the min and max of the list it was provided. |
---|
721 | """ |
---|
722 | |
---|
723 | if list == None: log.critical('List must be provided') |
---|
724 | |
---|
725 | return min(list), max(list) |
---|
726 | |
---|
727 | |
---|
728 | def get_runup_data_for_locations_from_file(gauge_filename, |
---|
729 | sww_filename, |
---|
730 | runup_filename, |
---|
731 | size=10, |
---|
732 | verbose=False): |
---|
733 | """this will read a csv file with the header x,y. Then look in a square |
---|
734 | 'size'x2 around this position for the 'max_inundaiton_height' in the |
---|
735 | 'sww_filename' and report the findings in the 'runup_filename'. |
---|
736 | |
---|
737 | WARNING: NO TESTS! |
---|
738 | """ |
---|
739 | |
---|
740 | from anuga.shallow_water.data_manager import \ |
---|
741 | get_maximum_inundation_data |
---|
742 | |
---|
743 | file = open(runup_filename, "w") |
---|
744 | file.write("easting,northing,runup \n ") |
---|
745 | file.close() |
---|
746 | |
---|
747 | #read gauge csv file to dictionary |
---|
748 | attribute_dic, title_index_dic = load_csv_as_dict(gauge_filename) |
---|
749 | northing = [float(x) for x in attribute_dic["y"]] |
---|
750 | easting = [float(x) for x in attribute_dic["x"]] |
---|
751 | |
---|
752 | log.critical('Reading %s' % sww_filename) |
---|
753 | |
---|
754 | runup_locations=[] |
---|
755 | for i, x in enumerate(northing): |
---|
756 | poly = [[int(easting[i]+size),int(northing[i]+size)], |
---|
757 | [int(easting[i]+size),int(northing[i]-size)], |
---|
758 | [int(easting[i]-size),int(northing[i]-size)], |
---|
759 | [int(easting[i]-size),int(northing[i]+size)]] |
---|
760 | |
---|
761 | run_up, x_y = get_maximum_inundation_data(filename=sww_filename, |
---|
762 | polygon=poly, |
---|
763 | verbose=False) |
---|
764 | |
---|
765 | #if no runup will return 0 instead of NONE |
---|
766 | if run_up==None: run_up=0 |
---|
767 | if x_y==None: x_y=[0,0] |
---|
768 | |
---|
769 | if verbose: |
---|
770 | log.critical('maximum inundation runup near %s is %s meters' |
---|
771 | % (x_y, run_up)) |
---|
772 | |
---|
773 | #writes to file |
---|
774 | file = open(runup_filename, "a") |
---|
775 | temp = '%s,%s,%s \n' % (x_y[0], x_y[1], run_up) |
---|
776 | file.write(temp) |
---|
777 | file.close() |
---|
778 | |
---|
779 | def sww2csv_gauges(sww_file, |
---|
780 | gauge_file, |
---|
781 | out_name='gauge_', |
---|
782 | quantities=['stage', 'depth', 'elevation', |
---|
783 | 'xmomentum', 'ymomentum'], |
---|
784 | verbose=False, |
---|
785 | use_cache=True): |
---|
786 | """ |
---|
787 | |
---|
788 | Inputs: |
---|
789 | NOTE: if using csv2timeseries_graphs after creating csv file, |
---|
790 | it is essential to export quantities 'depth' and 'elevation'. |
---|
791 | 'depth' is good to analyse gauges on land and elevation is used |
---|
792 | automatically by csv2timeseries_graphs in the legend. |
---|
793 | |
---|
794 | sww_file: path to any sww file |
---|
795 | |
---|
796 | gauge_file: Assumes that it follows this format |
---|
797 | name, easting, northing, elevation |
---|
798 | point1, 100.3, 50.2, 10.0 |
---|
799 | point2, 10.3, 70.3, 78.0 |
---|
800 | |
---|
801 | NOTE: order of column can change but names eg 'easting', 'elevation' |
---|
802 | must be the same! ALL lowercaps! |
---|
803 | |
---|
804 | out_name: prefix for output file name (default is 'gauge_') |
---|
805 | |
---|
806 | Outputs: |
---|
807 | one file for each gauge/point location in the points file. They |
---|
808 | will be named with this format in the same directory as the 'sww_file' |
---|
809 | <out_name><name>.csv |
---|
810 | eg gauge_point1.csv if <out_name> not supplied |
---|
811 | myfile_2_point1.csv if <out_name> ='myfile_2_' |
---|
812 | |
---|
813 | They will all have a header |
---|
814 | |
---|
815 | Usage: sww2csv_gauges(sww_file='test1.sww', |
---|
816 | quantities = ['stage', 'elevation','depth','bearing'], |
---|
817 | gauge_file='gauge.txt') |
---|
818 | |
---|
819 | Interpolate the quantities at a given set of locations, given |
---|
820 | an sww file. |
---|
821 | The results are written to a csv file. |
---|
822 | |
---|
823 | In the future let points be a points file. |
---|
824 | And the user choose the quantities. |
---|
825 | |
---|
826 | This is currently quite specific. |
---|
827 | If it needs to be more general, change things. |
---|
828 | |
---|
829 | This is really returning speed, not velocity. |
---|
830 | """ |
---|
831 | from gauge import sww2csv_gauges as sww2csv |
---|
832 | |
---|
833 | return sww2csv(sww_file, gauge_file, out_name, quantities, verbose, use_cache) |
---|
834 | |
---|
835 | def sww2timeseries(swwfiles, |
---|
836 | gauge_filename, |
---|
837 | production_dirs, |
---|
838 | report=None, |
---|
839 | reportname=None, |
---|
840 | plot_quantity=None, |
---|
841 | generate_fig=False, |
---|
842 | surface=None, |
---|
843 | time_min=None, |
---|
844 | time_max=None, |
---|
845 | time_thinning=1, |
---|
846 | time_unit=None, |
---|
847 | title_on=None, |
---|
848 | use_cache=False, |
---|
849 | verbose=False, |
---|
850 | output_centroids=False): |
---|
851 | from gauge import sww2timeseries as sww2timeseries_new |
---|
852 | return sww2timeseries_new(swwfiles, |
---|
853 | gauge_filename, |
---|
854 | production_dirs, |
---|
855 | report, |
---|
856 | reportname, |
---|
857 | plot_quantity, |
---|
858 | generate_fig, |
---|
859 | surface, |
---|
860 | time_min, |
---|
861 | time_max, |
---|
862 | time_thinning, |
---|
863 | time_unit, |
---|
864 | title_on, |
---|
865 | use_cache, |
---|
866 | verbose, |
---|
867 | output_centroids) |
---|
868 | |
---|
869 | def greens_law(d1, d2, h1, verbose=False): |
---|
870 | """Green's Law |
---|
871 | |
---|
872 | Green's Law allows an approximation of wave amplitude at |
---|
873 | a given depth based on the fourh root of the ratio of two depths |
---|
874 | and the amplitude at another given depth. |
---|
875 | |
---|
876 | Note, wave amplitude is equal to stage. |
---|
877 | |
---|
878 | Inputs: |
---|
879 | |
---|
880 | d1, d2 - the two depths |
---|
881 | h1 - the wave amplitude at d1 |
---|
882 | h2 - the derived amplitude at d2 |
---|
883 | |
---|
884 | h2 = h1 (d1/d2)^(1/4), where d2 cannot equal 0. |
---|
885 | |
---|
886 | """ |
---|
887 | |
---|
888 | d1 = ensure_numeric(d1) |
---|
889 | d2 = ensure_numeric(d2) |
---|
890 | h1 = ensure_numeric(h1) |
---|
891 | |
---|
892 | if d1 <= 0.0: |
---|
893 | msg = 'the first depth, d1 (%f), must be strictly positive' % (d1) |
---|
894 | raise Exception(msg) |
---|
895 | |
---|
896 | if d2 <= 0.0: |
---|
897 | msg = 'the second depth, d2 (%f), must be strictly positive' % (d2) |
---|
898 | raise Exception(msg) |
---|
899 | |
---|
900 | if h1 <= 0.0: |
---|
901 | msg = 'the wave amplitude, h1 (%f), must be strictly positive' % (h1) |
---|
902 | raise Exception(msg) |
---|
903 | |
---|
904 | h2 = h1*(d1/d2)**0.25 |
---|
905 | |
---|
906 | assert h2 > 0 |
---|
907 | |
---|
908 | return h2 |
---|
909 | |
---|
910 | |
---|
911 | def square_root(s): |
---|
912 | return sqrt(s) |
---|
913 | |
---|
914 | |
---|