[5897] | 1 | """This module contains various auxiliary function used by pyvolution. |
---|
| 2 | |
---|
| 3 | It is also a clearing house for functions that may later earn a module |
---|
| 4 | of their own. |
---|
| 5 | """ |
---|
| 6 | |
---|
[7711] | 7 | import anuga.geometry.polygon |
---|
[5897] | 8 | import sys |
---|
| 9 | import os |
---|
| 10 | |
---|
| 11 | from os import remove, mkdir, access, F_OK, R_OK, W_OK, sep,getcwd |
---|
| 12 | from os.path import exists, basename, split,join |
---|
| 13 | from warnings import warn |
---|
| 14 | from shutil import copy |
---|
| 15 | |
---|
[7695] | 16 | from anuga.utilities.numerical_tools import ensure_numeric, angle, NAN |
---|
[7765] | 17 | from anuga.file.csv_file import load_csv_as_dict |
---|
[7679] | 18 | |
---|
[5897] | 19 | from math import sqrt, atan, degrees |
---|
| 20 | |
---|
[6070] | 21 | # FIXME (Ole): Temporary short cuts - |
---|
| 22 | # FIXME (Ole): remove and update scripts where they are used |
---|
[5897] | 23 | from anuga.utilities.system_tools import get_revision_number |
---|
| 24 | from anuga.utilities.system_tools import store_version_info |
---|
| 25 | |
---|
[7317] | 26 | import anuga.utilities.log as log |
---|
[5897] | 27 | |
---|
[7276] | 28 | import numpy as num |
---|
[6086] | 29 | |
---|
[6145] | 30 | |
---|
[5897] | 31 | def file_function(filename, |
---|
| 32 | domain=None, |
---|
| 33 | quantities=None, |
---|
| 34 | interpolation_points=None, |
---|
| 35 | time_thinning=1, |
---|
[6173] | 36 | time_limit=None, |
---|
[5897] | 37 | verbose=False, |
---|
| 38 | use_cache=False, |
---|
[7672] | 39 | boundary_polygon=None, |
---|
[7673] | 40 | output_centroids=False): |
---|
[7679] | 41 | from file_function import file_function as file_function_new |
---|
| 42 | return file_function_new(filename, domain, quantities, interpolation_points, |
---|
| 43 | time_thinning, time_limit, verbose, use_cache, |
---|
| 44 | boundary_polygon, output_centroids) |
---|
[5897] | 45 | |
---|
[6070] | 46 | ## |
---|
| 47 | # @brief Replace multiple substrings in a string. |
---|
| 48 | # @param text The string to operate on. |
---|
| 49 | # @param dictionary A dict containing replacements, key->value. |
---|
| 50 | # @return The new string. |
---|
[5897] | 51 | def multiple_replace(text, dictionary): |
---|
| 52 | """Multiple replace of words in text |
---|
| 53 | |
---|
| 54 | text: String to be modified |
---|
| 55 | dictionary: Mapping of words that are to be substituted |
---|
| 56 | |
---|
| 57 | Python Cookbook 3.14 page 88 and page 90 |
---|
[6070] | 58 | http://code.activestate.com/recipes/81330/ |
---|
[5897] | 59 | """ |
---|
| 60 | |
---|
| 61 | import re |
---|
| 62 | |
---|
| 63 | #Create a regular expression from all of the dictionary keys |
---|
| 64 | #matching only entire words |
---|
| 65 | regex = re.compile(r'\b'+ \ |
---|
| 66 | r'\b|\b'.join(map(re.escape, dictionary.keys()))+ \ |
---|
| 67 | r'\b' ) |
---|
| 68 | |
---|
| 69 | #For each match, lookup the corresponding value in the dictionary |
---|
| 70 | return regex.sub(lambda match: dictionary[match.group(0)], text) |
---|
| 71 | |
---|
| 72 | |
---|
[6070] | 73 | ## |
---|
| 74 | # @brief Apply arbitrary expressions to the values of a dict. |
---|
| 75 | # @param expression A string expression to apply. |
---|
| 76 | # @param dictionary The dictionary to apply the expression to. |
---|
| 77 | def apply_expression_to_dictionary(expression, dictionary): |
---|
[5897] | 78 | """Apply arbitrary expression to values of dictionary |
---|
| 79 | |
---|
| 80 | Given an expression in terms of the keys, replace key by the |
---|
| 81 | corresponding values and evaluate. |
---|
| 82 | |
---|
| 83 | expression: Arbitrary, e.g. arithmetric, expression relating keys |
---|
| 84 | from dictionary. |
---|
| 85 | |
---|
| 86 | dictionary: Mapping between symbols in expression and objects that |
---|
| 87 | will be evaluated by expression. |
---|
| 88 | Values in dictionary must support operators given in |
---|
| 89 | expression e.g. by overloading |
---|
| 90 | |
---|
[7276] | 91 | Due to a limitation with numeric, this can not evaluate 0/0 |
---|
[5897] | 92 | In general, the user can fix by adding 1e-30 to the numerator. |
---|
| 93 | SciPy core can handle this situation. |
---|
| 94 | """ |
---|
| 95 | |
---|
| 96 | import types |
---|
| 97 | import re |
---|
| 98 | |
---|
| 99 | assert isinstance(expression, basestring) |
---|
| 100 | assert type(dictionary) == types.DictType |
---|
| 101 | |
---|
| 102 | #Convert dictionary values to textual representations suitable for eval |
---|
| 103 | D = {} |
---|
| 104 | for key in dictionary: |
---|
[6070] | 105 | D[key] = 'dictionary["%s"]' % key |
---|
[5897] | 106 | |
---|
| 107 | #Perform substitution of variables |
---|
| 108 | expression = multiple_replace(expression, D) |
---|
| 109 | |
---|
| 110 | #Evaluate and return |
---|
| 111 | try: |
---|
| 112 | return eval(expression) |
---|
| 113 | except NameError, e: |
---|
[6070] | 114 | msg = 'Expression "%s" could not be evaluated: %s' % (expression, e) |
---|
[5897] | 115 | raise NameError, msg |
---|
| 116 | except ValueError, e: |
---|
[6070] | 117 | msg = 'Expression "%s" could not be evaluated: %s' % (expression, e) |
---|
[5897] | 118 | raise ValueError, msg |
---|
| 119 | |
---|
[6070] | 120 | |
---|
| 121 | ## |
---|
| 122 | # @brief Format a float into a string. |
---|
| 123 | # @param value Float value to format. |
---|
| 124 | # @param format The format to use (%.2f is default). |
---|
| 125 | # @return The formatted float as a string. |
---|
[5897] | 126 | def get_textual_float(value, format = '%.2f'): |
---|
| 127 | """Get textual representation of floating point numbers |
---|
| 128 | and accept None as valid entry |
---|
| 129 | |
---|
| 130 | format is a string - default = '%.2f' |
---|
| 131 | """ |
---|
| 132 | |
---|
| 133 | if value is None: |
---|
| 134 | return 'None' |
---|
| 135 | else: |
---|
| 136 | try: |
---|
| 137 | float(value) |
---|
| 138 | except: |
---|
| 139 | # May this is a vector |
---|
| 140 | if len(value) > 1: |
---|
| 141 | s = '(' |
---|
| 142 | for v in value: |
---|
| 143 | s += get_textual_float(v, format) + ', ' |
---|
| 144 | |
---|
| 145 | s = s[:-2] + ')' # Strip trailing comma and close |
---|
| 146 | return s |
---|
| 147 | else: |
---|
| 148 | raise 'Illegal input to get_textual_float:', value |
---|
| 149 | else: |
---|
[6070] | 150 | return format % float(value) |
---|
[5897] | 151 | |
---|
[6070] | 152 | ## |
---|
| 153 | # @brief Read gauge info from a file. |
---|
| 154 | # @param filename The name of the file to read. |
---|
| 155 | # @return A (gauges, gaugelocation, elev) tuple. |
---|
[5897] | 156 | def get_gauges_from_file(filename): |
---|
[7679] | 157 | return gauge_get_from_file(filename) |
---|
[6070] | 158 | |
---|
| 159 | |
---|
| 160 | ## |
---|
| 161 | # @brief Check that input quantities in quantity list are legal. |
---|
| 162 | # @param quantity Quantity list to check. |
---|
| 163 | # @note Raises an exception of list is not legal. |
---|
[5897] | 164 | def check_list(quantity): |
---|
| 165 | """ Check that input quantities in quantity list are possible |
---|
| 166 | """ |
---|
[6070] | 167 | import sys |
---|
| 168 | |
---|
[5897] | 169 | all_quantity = ['stage', 'depth', 'momentum', 'xmomentum', |
---|
| 170 | 'ymomentum', 'speed', 'bearing', 'elevation'] |
---|
| 171 | |
---|
[6070] | 172 | # convert all quanitiy names to lowercase |
---|
[5897] | 173 | for i,j in enumerate(quantity): |
---|
| 174 | quantity[i] = quantity[i].lower() |
---|
[6070] | 175 | |
---|
| 176 | # check that all names in 'quantity' appear in 'all_quantity' |
---|
[5897] | 177 | p = list(set(quantity).difference(set(all_quantity))) |
---|
[6070] | 178 | if len(p) != 0: |
---|
[5897] | 179 | msg = 'Quantities %s do not exist - please try again' %p |
---|
| 180 | raise Exception, msg |
---|
| 181 | |
---|
[6070] | 182 | |
---|
| 183 | ## |
---|
| 184 | # @brief Calculate velocity bearing from North. |
---|
| 185 | # @param uh ?? |
---|
| 186 | # @param vh ?? |
---|
| 187 | # @return The calculated bearing. |
---|
[5897] | 188 | def calc_bearing(uh, vh): |
---|
| 189 | """ Calculate velocity bearing from North |
---|
| 190 | """ |
---|
| 191 | #FIXME (Ole): I reckon we should refactor this one to use |
---|
| 192 | # the function angle() in utilities/numerical_tools |
---|
| 193 | # |
---|
| 194 | # It will be a simple matter of |
---|
| 195 | # * converting from radians to degrees |
---|
| 196 | # * moving the reference direction from [1,0] to North |
---|
| 197 | # * changing from counter clockwise to clocwise. |
---|
[7695] | 198 | |
---|
| 199 | # if indeterminate, just return |
---|
| 200 | if uh==0 and vh==0: |
---|
| 201 | return NAN |
---|
[5897] | 202 | |
---|
[7693] | 203 | return degrees(angle([uh, vh], [0, -1])) |
---|
[5897] | 204 | |
---|
| 205 | |
---|
[6070] | 206 | ## |
---|
| 207 | # @brief Create a nested sub-directory path. |
---|
| 208 | # @param root_directory The base diretory path. |
---|
| 209 | # @param directories An iterable of sub-directory names. |
---|
| 210 | # @return The final joined directory path. |
---|
| 211 | # @note If each sub-directory doesn't exist, it will be created. |
---|
[5897] | 212 | def add_directories(root_directory, directories): |
---|
| 213 | """ |
---|
[6070] | 214 | Add the first sub-directory in 'directories' to root_directory. |
---|
| 215 | Then add the second sub-directory to the accumulating path and so on. |
---|
[5897] | 216 | |
---|
| 217 | Return the path of the final directory. |
---|
| 218 | |
---|
[6070] | 219 | This is handy for specifying and creating a directory where data will go. |
---|
[5897] | 220 | """ |
---|
| 221 | dir = root_directory |
---|
| 222 | for new_dir in directories: |
---|
| 223 | dir = os.path.join(dir, new_dir) |
---|
| 224 | if not access(dir,F_OK): |
---|
[6070] | 225 | mkdir(dir) |
---|
[5897] | 226 | return dir |
---|
| 227 | |
---|
[6070] | 228 | |
---|
| 229 | ## |
---|
| 230 | # @brief |
---|
| 231 | # @param verbose |
---|
| 232 | # @param kwargs |
---|
| 233 | # @return |
---|
| 234 | # @note TEMP |
---|
[5897] | 235 | def store_parameters(verbose=False,**kwargs): |
---|
| 236 | """Temporary Interface to new location""" |
---|
| 237 | |
---|
| 238 | from anuga.shallow_water.data_manager \ |
---|
| 239 | import store_parameters as dm_store_parameters |
---|
[7317] | 240 | log.critical('store_parameters has moved from util.py.') |
---|
| 241 | log.critical('Please use "from anuga.shallow_water.data_manager ' |
---|
| 242 | 'import store_parameters"') |
---|
[5897] | 243 | |
---|
| 244 | return dm_store_parameters(verbose=False,**kwargs) |
---|
| 245 | |
---|
[6070] | 246 | |
---|
| 247 | ## |
---|
| 248 | # @brief Remove vertices that are not associated with any triangle. |
---|
| 249 | # @param verts An iterable (or array) of points. |
---|
| 250 | # @param triangles An iterable of 3 element tuples. |
---|
| 251 | # @param number_of_full_nodes ?? |
---|
| 252 | # @return (verts, triangles) where 'verts' has been updated. |
---|
[5897] | 253 | def remove_lone_verts(verts, triangles, number_of_full_nodes=None): |
---|
[6070] | 254 | """Removes vertices that are not associated with any triangles. |
---|
[5897] | 255 | |
---|
[6070] | 256 | verts is a list/array of points. |
---|
| 257 | triangles is a list of 3 element tuples. Each tuple represents a triangle. |
---|
[5897] | 258 | number_of_full_nodes relate to parallelism when a mesh has an |
---|
| 259 | extra layer of ghost points. |
---|
| 260 | """ |
---|
[6070] | 261 | |
---|
[5897] | 262 | verts = ensure_numeric(verts) |
---|
| 263 | triangles = ensure_numeric(triangles) |
---|
| 264 | |
---|
| 265 | N = len(verts) |
---|
| 266 | |
---|
| 267 | # initialise the array to easily find the index of the first loner |
---|
[6070] | 268 | # ie, if N=3 -> [6,5,4] |
---|
[6145] | 269 | loners=num.arange(2*N, N, -1) |
---|
[5897] | 270 | for t in triangles: |
---|
| 271 | for vert in t: |
---|
| 272 | loners[vert]= vert # all non-loners will have loners[i]=i |
---|
| 273 | |
---|
| 274 | lone_start = 2*N - max(loners) # The index of the first loner |
---|
| 275 | |
---|
| 276 | if lone_start-1 == N: |
---|
| 277 | # no loners |
---|
| 278 | pass |
---|
| 279 | elif min(loners[lone_start:N]) > N: |
---|
| 280 | # All the loners are at the end of the vert array |
---|
| 281 | verts = verts[0:lone_start] |
---|
| 282 | else: |
---|
| 283 | # change the loners list so it can be used to modify triangles |
---|
| 284 | # Remove the loners from verts |
---|
| 285 | # Could've used X=compress(less(loners,N),loners) |
---|
[7276] | 286 | # verts=num.take(verts,X,axis=0) to Remove the loners from verts |
---|
[5897] | 287 | # but I think it would use more memory |
---|
[7685] | 288 | new_i = lone_start # point at first loner - 'shuffle down' target |
---|
[5897] | 289 | for i in range(lone_start, N): |
---|
[7685] | 290 | if loners[i] >= N: # [i] is a loner, leave alone |
---|
[5897] | 291 | pass |
---|
[7685] | 292 | else: # a non-loner, move down |
---|
[5897] | 293 | loners[i] = new_i |
---|
| 294 | verts[new_i] = verts[i] |
---|
| 295 | new_i += 1 |
---|
| 296 | verts = verts[0:new_i] |
---|
| 297 | |
---|
| 298 | # Modify the triangles |
---|
[6145] | 299 | triangles = num.choose(triangles,loners) |
---|
[5897] | 300 | return verts, triangles |
---|
| 301 | |
---|
[6072] | 302 | |
---|
| 303 | ## |
---|
| 304 | # @brief Compute centroid values from vertex values |
---|
| 305 | # @param x Values at vertices of triangular mesh. |
---|
| 306 | # @param triangles Nx3 integer array pointing to vertex information. |
---|
| 307 | # @return [N] array of centroid values. |
---|
[5897] | 308 | def get_centroid_values(x, triangles): |
---|
| 309 | """Compute centroid values from vertex values |
---|
| 310 | |
---|
| 311 | x: Values at vertices of triangular mesh |
---|
| 312 | triangles: Nx3 integer array pointing to vertex information |
---|
| 313 | for each of the N triangels. Elements of triangles are |
---|
| 314 | indices into x |
---|
| 315 | """ |
---|
| 316 | |
---|
[7276] | 317 | xc = num.zeros(triangles.shape[0], num.float) # Space for centroid info |
---|
[5897] | 318 | |
---|
| 319 | for k in range(triangles.shape[0]): |
---|
| 320 | # Indices of vertices |
---|
| 321 | i0 = triangles[k][0] |
---|
| 322 | i1 = triangles[k][1] |
---|
| 323 | i2 = triangles[k][2] |
---|
| 324 | |
---|
| 325 | xc[k] = (x[i0] + x[i1] + x[i2])/3 |
---|
| 326 | |
---|
| 327 | return xc |
---|
| 328 | |
---|
[6072] | 329 | |
---|
[6070] | 330 | # @note TEMP |
---|
[5897] | 331 | def make_plots_from_csv_file(directories_dic={dir:['gauge', 0, 0]}, |
---|
| 332 | output_dir='', |
---|
| 333 | base_name='', |
---|
| 334 | plot_numbers=['3-5'], |
---|
| 335 | quantities=['speed','stage','momentum'], |
---|
| 336 | assess_all_csv_files=True, |
---|
[6169] | 337 | extra_plot_name='test'): |
---|
[5897] | 338 | |
---|
[6072] | 339 | msg = 'make_plots_from_csv_file has been replaced by csv2timeseries_graphs ' |
---|
| 340 | msg += 'Please use "from anuga.abstract_2d_finite_volumes.util import ' \ |
---|
| 341 | 'csv2timeseries_graphs"' |
---|
| 342 | raise Exception, msg |
---|
[5897] | 343 | |
---|
| 344 | return csv2timeseries_graphs(directories_dic, |
---|
| 345 | output_dir, |
---|
| 346 | base_name, |
---|
| 347 | plot_numbers, |
---|
| 348 | quantities, |
---|
| 349 | extra_plot_name, |
---|
[6169] | 350 | assess_all_csv_files) |
---|
[6072] | 351 | |
---|
| 352 | |
---|
| 353 | ## |
---|
| 354 | # @brief Plot time series from CSV files. |
---|
| 355 | # @param directories_dic |
---|
| 356 | # @param output_dir |
---|
| 357 | # @param base_name |
---|
| 358 | # @param plot_numbers |
---|
| 359 | # @param quantities |
---|
| 360 | # @param extra_plot_name |
---|
| 361 | # @param assess_all_csv_files |
---|
| 362 | # @param create_latex |
---|
| 363 | # @param verbose |
---|
| 364 | # @note Assumes that 'elevation' is in the CSV file(s). |
---|
[5897] | 365 | def csv2timeseries_graphs(directories_dic={}, |
---|
[6072] | 366 | output_dir='', |
---|
| 367 | base_name=None, |
---|
| 368 | plot_numbers='', |
---|
| 369 | quantities=['stage'], |
---|
| 370 | extra_plot_name='', |
---|
| 371 | assess_all_csv_files=True, |
---|
| 372 | create_latex=False, |
---|
| 373 | verbose=False): |
---|
[5897] | 374 | |
---|
| 375 | """ |
---|
| 376 | Read in csv files that have the right header information and |
---|
| 377 | plot time series such as Stage, Speed, etc. Will also plot several |
---|
| 378 | time series on one plot. Filenames must follow this convention, |
---|
| 379 | <base_name><plot_number>.csv eg gauge_timeseries3.csv |
---|
| 380 | |
---|
| 381 | NOTE: relies that 'elevation' is in the csv file! |
---|
| 382 | |
---|
| 383 | Each file represents a location and within each file there are |
---|
| 384 | time, quantity columns. |
---|
| 385 | |
---|
| 386 | For example: |
---|
| 387 | if "directories_dic" defines 4 directories and in each directories |
---|
| 388 | there is a csv files corresponding to the right "plot_numbers", |
---|
| 389 | this will create a plot with 4 lines one for each directory AND |
---|
| 390 | one plot for each "quantities". ??? FIXME: unclear. |
---|
| 391 | |
---|
| 392 | Usage: |
---|
| 393 | csv2timeseries_graphs(directories_dic={'slide'+sep:['Slide',0, 0], |
---|
| 394 | 'fixed_wave'+sep:['Fixed Wave',0,0]}, |
---|
| 395 | output_dir='fixed_wave'+sep, |
---|
| 396 | base_name='gauge_timeseries_', |
---|
| 397 | plot_numbers='', |
---|
| 398 | quantities=['stage','speed'], |
---|
| 399 | extra_plot_name='', |
---|
| 400 | assess_all_csv_files=True, |
---|
| 401 | create_latex=False, |
---|
| 402 | verbose=True) |
---|
| 403 | this will create one plot for stage with both 'slide' and |
---|
| 404 | 'fixed_wave' lines on it for stage and speed for each csv |
---|
| 405 | file with 'gauge_timeseries_' as the prefix. The graghs |
---|
| 406 | will be in the output directory 'fixed_wave' and the graph |
---|
| 407 | axis will be determined by assessing all the |
---|
| 408 | |
---|
| 409 | ANOTHER EXAMPLE |
---|
| 410 | new_csv2timeseries_graphs(directories_dic={'slide'+sep:['Slide',0, 0], |
---|
| 411 | 'fixed_wave'+sep:['Fixed Wave',0,0]}, |
---|
| 412 | output_dir='fixed_wave'+sep, |
---|
| 413 | base_name='gauge_timeseries_', |
---|
| 414 | plot_numbers=['1-3'], |
---|
| 415 | quantities=['stage','speed'], |
---|
| 416 | extra_plot_name='', |
---|
| 417 | assess_all_csv_files=False, |
---|
| 418 | create_latex=False, |
---|
| 419 | verbose=True) |
---|
| 420 | This will plot csv files called gauge_timeseries_1.csv and |
---|
| 421 | gauge_timeseries3.csv from both 'slide' and 'fixed_wave' directories |
---|
| 422 | to 'fixed_wave'. There will be 4 plots created two speed and two stage |
---|
| 423 | one for each csv file. There will be two lines on each of these plots. |
---|
| 424 | And the axis will have been determined from only these files, had |
---|
| 425 | assess_all_csv_files = True all csv file with 'gauges_timeseries_' prefix |
---|
| 426 | would of been assessed. |
---|
| 427 | |
---|
| 428 | ANOTHER EXAMPLE |
---|
| 429 | csv2timeseries_graphs({'J:'+sep+'anuga_validation'+sep:['new',20,-.1], |
---|
| 430 | 'J:'+sep+'conical_island'+sep:['test',0,0]}, |
---|
| 431 | output_dir='', |
---|
| 432 | plot_numbers=['1','3'], |
---|
| 433 | quantities=['stage','depth','bearing'], |
---|
| 434 | base_name='gauge_b', |
---|
| 435 | assess_all_csv_files=True, |
---|
| 436 | verbose=True) |
---|
| 437 | |
---|
[6072] | 438 | This will produce one plot for each quantity (therefore 3) in the |
---|
| 439 | current directory, each plot will have 2 lines on them. The first |
---|
| 440 | plot named 'new' will have the time offseted by 20secs and the stage |
---|
| 441 | height adjusted by -0.1m |
---|
[5897] | 442 | |
---|
| 443 | Inputs: |
---|
| 444 | directories_dic: dictionary of directory with values (plot |
---|
| 445 | legend name for directory), (start time of |
---|
| 446 | the time series) and the (value to add to |
---|
| 447 | stage if needed). For example |
---|
[6072] | 448 | {dir1:['Anuga_ons',5000, 0], |
---|
| 449 | dir2:['b_emoth',5000,1.5], |
---|
| 450 | dir3:['b_ons',5000,1.5]} |
---|
[5897] | 451 | Having multiple directories defined will plot them on |
---|
[6072] | 452 | one plot, therefore there will be 3 lines on each of |
---|
| 453 | these plot. If you only want one line per plot call |
---|
| 454 | csv2timeseries_graph separately for each directory, |
---|
| 455 | eg only have one directory in the 'directories_dic' in |
---|
| 456 | each call. |
---|
[5897] | 457 | |
---|
[6072] | 458 | output_dir: directory for the plot outputs. Only important to define when |
---|
| 459 | you have more than one directory in your directories_dic, if |
---|
| 460 | you have not defined it and you have multiple directories in |
---|
| 461 | 'directories_dic' there will be plots in each directory, |
---|
| 462 | however only one directory will contain the complete |
---|
| 463 | plot/graphs. |
---|
[5897] | 464 | |
---|
[6072] | 465 | base_name: Is used a couple of times. |
---|
| 466 | 1) to find the csv files to be plotted if there is no |
---|
| 467 | 'plot_numbers' then csv files with 'base_name' are plotted |
---|
| 468 | 2) in the title of the plots, the length of base_name is |
---|
| 469 | removed from the front of the filename to be used in the |
---|
| 470 | title. |
---|
[5897] | 471 | This could be changed if needed. |
---|
| 472 | Note is ignored if assess_all_csv_files=True |
---|
| 473 | |
---|
| 474 | plot_numbers: a String list of numbers to plot. For example |
---|
| 475 | [0-4,10,15-17] will read and attempt to plot |
---|
[6072] | 476 | the follow 0,1,2,3,4,10,15,16,17 |
---|
| 477 | NOTE: if no plot numbers this will create one plot per |
---|
| 478 | quantity, per gauge |
---|
| 479 | |
---|
| 480 | quantities: Will get available quantities from the header in the csv |
---|
| 481 | file. Quantities must be one of these. |
---|
[5897] | 482 | NOTE: ALL QUANTITY NAMES MUST BE lower case! |
---|
| 483 | |
---|
| 484 | extra_plot_name: A string that is appended to the end of the |
---|
| 485 | output filename. |
---|
| 486 | |
---|
| 487 | assess_all_csv_files: if true it will read ALL csv file with |
---|
| 488 | "base_name", regardless of 'plot_numbers' |
---|
| 489 | and determine a uniform set of axes for |
---|
| 490 | Stage, Speed and Momentum. IF FALSE it |
---|
| 491 | will only read the csv file within the |
---|
| 492 | 'plot_numbers' |
---|
| 493 | |
---|
| 494 | create_latex: NOT IMPLEMENTED YET!! sorry Jane.... |
---|
| 495 | |
---|
| 496 | OUTPUTS: saves the plots to |
---|
| 497 | <output_dir><base_name><plot_number><extra_plot_name>.png |
---|
| 498 | """ |
---|
[6072] | 499 | |
---|
[5897] | 500 | try: |
---|
| 501 | import pylab |
---|
| 502 | except ImportError: |
---|
| 503 | msg='csv2timeseries_graphs needs pylab to be installed correctly' |
---|
| 504 | raise msg |
---|
| 505 | #ANUGA don't need pylab to work so the system doesn't |
---|
| 506 | #rely on pylab being installed |
---|
| 507 | return |
---|
| 508 | |
---|
| 509 | from os import sep |
---|
[7765] | 510 | from anuga.utilities.file_utils import get_all_files_with_extension |
---|
[5897] | 511 | |
---|
| 512 | seconds_in_hour = 3600 |
---|
| 513 | seconds_in_minutes = 60 |
---|
| 514 | |
---|
| 515 | quantities_label={} |
---|
| 516 | # quantities_label['time'] = 'time (hours)' |
---|
| 517 | quantities_label['time'] = 'time (minutes)' |
---|
| 518 | quantities_label['stage'] = 'wave height (m)' |
---|
| 519 | quantities_label['speed'] = 'speed (m/s)' |
---|
| 520 | quantities_label['momentum'] = 'momentum (m^2/sec)' |
---|
| 521 | quantities_label['depth'] = 'water depth (m)' |
---|
| 522 | quantities_label['xmomentum'] = 'momentum (m^2/sec)' |
---|
| 523 | quantities_label['ymomentum'] = 'momentum (m^2/sec)' |
---|
| 524 | quantities_label['bearing'] = 'degrees (o)' |
---|
| 525 | quantities_label['elevation'] = 'elevation (m)' |
---|
| 526 | |
---|
| 527 | if extra_plot_name != '': |
---|
[6072] | 528 | extra_plot_name = '_' + extra_plot_name |
---|
[5897] | 529 | |
---|
| 530 | new_plot_numbers=[] |
---|
| 531 | #change plot_numbers to list, eg ['0-4','10'] |
---|
| 532 | #to ['0','1','2','3','4','10'] |
---|
| 533 | for i, num_string in enumerate(plot_numbers): |
---|
| 534 | if '-' in num_string: |
---|
| 535 | start = int(num_string[:num_string.rfind('-')]) |
---|
[6072] | 536 | end = int(num_string[num_string.rfind('-') + 1:]) + 1 |
---|
[5897] | 537 | for x in range(start, end): |
---|
| 538 | new_plot_numbers.append(str(x)) |
---|
| 539 | else: |
---|
| 540 | new_plot_numbers.append(num_string) |
---|
| 541 | |
---|
| 542 | #finds all the files that fit the specs provided and return a list of them |
---|
| 543 | #so to help find a uniform max and min for the plots... |
---|
| 544 | list_filenames=[] |
---|
| 545 | all_csv_filenames=[] |
---|
[7317] | 546 | if verbose: log.critical('Determining files to access for axes ranges.') |
---|
[5897] | 547 | |
---|
| 548 | for i,directory in enumerate(directories_dic.keys()): |
---|
| 549 | all_csv_filenames.append(get_all_files_with_extension(directory, |
---|
[6072] | 550 | base_name, '.csv')) |
---|
[5897] | 551 | |
---|
| 552 | filenames=[] |
---|
| 553 | if plot_numbers == '': |
---|
| 554 | list_filenames.append(get_all_files_with_extension(directory, |
---|
[6072] | 555 | base_name,'.csv')) |
---|
[5897] | 556 | else: |
---|
| 557 | for number in new_plot_numbers: |
---|
[6072] | 558 | filenames.append(base_name + number) |
---|
[5897] | 559 | list_filenames.append(filenames) |
---|
| 560 | |
---|
| 561 | #use all the files to get the values for the plot axis |
---|
| 562 | max_start_time= -1000. |
---|
| 563 | min_start_time = 100000 |
---|
| 564 | |
---|
[7317] | 565 | if verbose: log.critical('Determining uniform axes') |
---|
[6072] | 566 | |
---|
[5897] | 567 | #this entire loop is to determine the min and max range for the |
---|
| 568 | #axes of the plots |
---|
| 569 | |
---|
| 570 | # quantities.insert(0,'elevation') |
---|
| 571 | quantities.insert(0,'time') |
---|
| 572 | |
---|
| 573 | directory_quantity_value={} |
---|
| 574 | # quantity_value={} |
---|
| 575 | min_quantity_value={} |
---|
| 576 | max_quantity_value={} |
---|
| 577 | |
---|
| 578 | for i, directory in enumerate(directories_dic.keys()): |
---|
[6072] | 579 | filename_quantity_value = {} |
---|
| 580 | if assess_all_csv_files == False: |
---|
[5897] | 581 | which_csv_to_assess = list_filenames[i] |
---|
| 582 | else: |
---|
| 583 | #gets list of filenames for directory "i" |
---|
| 584 | which_csv_to_assess = all_csv_filenames[i] |
---|
| 585 | |
---|
| 586 | for j, filename in enumerate(which_csv_to_assess): |
---|
[6072] | 587 | quantity_value = {} |
---|
[5897] | 588 | |
---|
[6072] | 589 | dir_filename = join(directory,filename) |
---|
[7737] | 590 | attribute_dic, title_index_dic = load_csv_as_dict(dir_filename + '.csv') |
---|
[5897] | 591 | directory_start_time = directories_dic[directory][1] |
---|
| 592 | directory_add_tide = directories_dic[directory][2] |
---|
| 593 | |
---|
[7317] | 594 | if verbose: log.critical('reading: %s.csv' % dir_filename) |
---|
[6072] | 595 | |
---|
[5897] | 596 | #add time to get values |
---|
| 597 | for k, quantity in enumerate(quantities): |
---|
[6072] | 598 | quantity_value[quantity] = [float(x) for |
---|
| 599 | x in attribute_dic[quantity]] |
---|
[5897] | 600 | |
---|
| 601 | #add tide to stage if provided |
---|
| 602 | if quantity == 'stage': |
---|
[7276] | 603 | quantity_value[quantity] = num.array(quantity_value[quantity], |
---|
| 604 | num.float) + directory_add_tide |
---|
[5897] | 605 | |
---|
| 606 | #condition to find max and mins for all the plots |
---|
| 607 | # populate the list with something when i=0 and j=0 and |
---|
| 608 | # then compare to the other values to determine abs max and min |
---|
| 609 | if i==0 and j==0: |
---|
| 610 | min_quantity_value[quantity], \ |
---|
[6072] | 611 | max_quantity_value[quantity] = \ |
---|
| 612 | get_min_max_values(quantity_value[quantity]) |
---|
[5897] | 613 | |
---|
| 614 | if quantity != 'time': |
---|
[6072] | 615 | min_quantity_value[quantity] = \ |
---|
| 616 | min_quantity_value[quantity] *1.1 |
---|
| 617 | max_quantity_value[quantity] = \ |
---|
| 618 | max_quantity_value[quantity] *1.1 |
---|
[5897] | 619 | else: |
---|
| 620 | min, max = get_min_max_values(quantity_value[quantity]) |
---|
| 621 | |
---|
[6072] | 622 | # min and max are multipled by "1+increase_axis" to get axes |
---|
| 623 | # that are slighty bigger than the max and mins |
---|
| 624 | # so the plots look good. |
---|
[5897] | 625 | |
---|
| 626 | increase_axis = (max-min)*0.05 |
---|
[6072] | 627 | if min <= min_quantity_value[quantity]: |
---|
[5897] | 628 | if quantity == 'time': |
---|
[6072] | 629 | min_quantity_value[quantity] = min |
---|
[5897] | 630 | else: |
---|
| 631 | if round(min,2) == 0.00: |
---|
[6072] | 632 | min_quantity_value[quantity] = -increase_axis |
---|
| 633 | # min_quantity_value[quantity] = -2. |
---|
| 634 | #min_quantity_value[quantity] = \ |
---|
| 635 | # -max_quantity_value[quantity]*increase_axis |
---|
[5897] | 636 | else: |
---|
[6072] | 637 | # min_quantity_value[quantity] = \ |
---|
| 638 | # min*(1+increase_axis) |
---|
[5897] | 639 | min_quantity_value[quantity]=min-increase_axis |
---|
| 640 | |
---|
[6072] | 641 | if max > max_quantity_value[quantity]: |
---|
[5897] | 642 | if quantity == 'time': |
---|
[6072] | 643 | max_quantity_value[quantity] = max |
---|
[5897] | 644 | else: |
---|
[6072] | 645 | max_quantity_value[quantity] = max + increase_axis |
---|
[5897] | 646 | # max_quantity_value[quantity]=max*(1+increase_axis) |
---|
| 647 | |
---|
| 648 | #set the time... ??? |
---|
| 649 | if min_start_time > directory_start_time: |
---|
| 650 | min_start_time = directory_start_time |
---|
| 651 | if max_start_time < directory_start_time: |
---|
| 652 | max_start_time = directory_start_time |
---|
| 653 | |
---|
| 654 | filename_quantity_value[filename]=quantity_value |
---|
| 655 | |
---|
| 656 | directory_quantity_value[directory]=filename_quantity_value |
---|
| 657 | |
---|
| 658 | #final step to unifrom axis for the graphs |
---|
| 659 | quantities_axis={} |
---|
| 660 | |
---|
| 661 | for i, quantity in enumerate(quantities): |
---|
[6072] | 662 | quantities_axis[quantity] = (float(min_start_time) \ |
---|
| 663 | / float(seconds_in_minutes), |
---|
| 664 | (float(max_quantity_value['time']) \ |
---|
| 665 | + float(max_start_time)) \ |
---|
| 666 | / float(seconds_in_minutes), |
---|
| 667 | min_quantity_value[quantity], |
---|
| 668 | max_quantity_value[quantity]) |
---|
| 669 | |
---|
[5897] | 670 | if verbose and (quantity != 'time' and quantity != 'elevation'): |
---|
[7317] | 671 | log.critical('axis for quantity %s are x:(%s to %s)%s ' |
---|
| 672 | 'and y:(%s to %s)%s' |
---|
| 673 | % (quantity, quantities_axis[quantity][0], |
---|
| 674 | quantities_axis[quantity][1], |
---|
| 675 | quantities_label['time'], |
---|
| 676 | quantities_axis[quantity][2], |
---|
| 677 | quantities_axis[quantity][3], |
---|
| 678 | quantities_label[quantity])) |
---|
[5897] | 679 | |
---|
| 680 | cstr = ['b', 'r', 'g', 'c', 'm', 'y', 'k'] |
---|
| 681 | |
---|
[7317] | 682 | if verbose: log.critical('Now start to plot') |
---|
[5897] | 683 | |
---|
| 684 | i_max = len(directories_dic.keys()) |
---|
[6072] | 685 | legend_list_dic = {} |
---|
| 686 | legend_list = [] |
---|
[5897] | 687 | for i, directory in enumerate(directories_dic.keys()): |
---|
[7317] | 688 | if verbose: log.critical('Plotting in %s %s' |
---|
| 689 | % (directory, new_plot_numbers)) |
---|
[6072] | 690 | |
---|
| 691 | # FIXME THIS SORT IS VERY IMPORTANT |
---|
| 692 | # Without it the assigned plot numbers may not work correctly |
---|
| 693 | # there must be a better way |
---|
[5897] | 694 | list_filenames[i].sort() |
---|
| 695 | for j, filename in enumerate(list_filenames[i]): |
---|
[7317] | 696 | if verbose: log.critical('Starting %s' % filename) |
---|
[6072] | 697 | |
---|
[5897] | 698 | directory_name = directories_dic[directory][0] |
---|
| 699 | directory_start_time = directories_dic[directory][1] |
---|
| 700 | directory_add_tide = directories_dic[directory][2] |
---|
| 701 | |
---|
[6072] | 702 | # create an if about the start time and tide height if don't exist |
---|
[7737] | 703 | attribute_dic, title_index_dic = load_csv_as_dict(directory + sep |
---|
[6072] | 704 | + filename + '.csv') |
---|
[5897] | 705 | #get data from dict in to list |
---|
| 706 | #do maths to list by changing to array |
---|
[6145] | 707 | t = (num.array(directory_quantity_value[directory][filename]['time']) |
---|
[6072] | 708 | + directory_start_time) / seconds_in_minutes |
---|
[5897] | 709 | |
---|
| 710 | #finds the maximum elevation, used only as a test |
---|
| 711 | # and as info in the graphs |
---|
| 712 | max_ele=-100000 |
---|
| 713 | min_ele=100000 |
---|
| 714 | elevation = [float(x) for x in attribute_dic["elevation"]] |
---|
| 715 | |
---|
| 716 | min_ele, max_ele = get_min_max_values(elevation) |
---|
| 717 | |
---|
| 718 | if min_ele != max_ele: |
---|
[7317] | 719 | log.critical("Note! Elevation changes in %s" % dir_filename) |
---|
[5897] | 720 | |
---|
[6072] | 721 | # creates a dictionary with keys that is the filename and attributes |
---|
| 722 | # are a list of lists containing 'directory_name' and 'elevation'. |
---|
| 723 | # This is used to make the contents for the legends in the graphs, |
---|
| 724 | # this is the name of the model and the elevation. All in this |
---|
| 725 | # great one liner from DG. If the key 'filename' doesn't exist it |
---|
| 726 | # creates the entry if the entry exist it appends to the key. |
---|
[5897] | 727 | |
---|
[6072] | 728 | legend_list_dic.setdefault(filename,[]) \ |
---|
| 729 | .append([directory_name, round(max_ele, 3)]) |
---|
| 730 | |
---|
| 731 | # creates a LIST for the legend on the last iteration of the |
---|
| 732 | # directories which is when "legend_list_dic" has been fully |
---|
| 733 | # populated. Creates a list of strings which is used in the legend |
---|
[5897] | 734 | # only runs on the last iteration for all the gauges(csv) files |
---|
| 735 | # empties the list before creating it |
---|
[6072] | 736 | |
---|
| 737 | if i == i_max - 1: |
---|
| 738 | legend_list = [] |
---|
[5897] | 739 | |
---|
| 740 | for name_and_elevation in legend_list_dic[filename]: |
---|
[6072] | 741 | legend_list.append('%s (elevation = %sm)'\ |
---|
| 742 | % (name_and_elevation[0], |
---|
| 743 | name_and_elevation[1])) |
---|
[5897] | 744 | |
---|
| 745 | #skip time and elevation so it is not plotted! |
---|
| 746 | for k, quantity in enumerate(quantities): |
---|
| 747 | if quantity != 'time' and quantity != 'elevation': |
---|
[6171] | 748 | pylab.figure(int(k*100+j)) |
---|
[5897] | 749 | pylab.ylabel(quantities_label[quantity]) |
---|
[6072] | 750 | pylab.plot(t, |
---|
| 751 | directory_quantity_value[directory]\ |
---|
| 752 | [filename][quantity], |
---|
| 753 | c = cstr[i], linewidth=1) |
---|
[5897] | 754 | pylab.xlabel(quantities_label['time']) |
---|
| 755 | pylab.axis(quantities_axis[quantity]) |
---|
| 756 | pylab.legend(legend_list,loc='upper right') |
---|
| 757 | |
---|
[6072] | 758 | pylab.title('%s at %s gauge' |
---|
| 759 | % (quantity, filename[len(base_name):])) |
---|
| 760 | |
---|
[5897] | 761 | if output_dir == '': |
---|
[6072] | 762 | figname = '%s%s%s_%s%s.png' \ |
---|
| 763 | % (directory, sep, filename, quantity, |
---|
| 764 | extra_plot_name) |
---|
[5897] | 765 | else: |
---|
[6072] | 766 | figname = '%s%s%s_%s%s.png' \ |
---|
| 767 | % (output_dir, sep, filename, quantity, |
---|
| 768 | extra_plot_name) |
---|
| 769 | |
---|
[7317] | 770 | if verbose: log.critical('saving figure here %s' % figname) |
---|
[6072] | 771 | |
---|
[5897] | 772 | pylab.savefig(figname) |
---|
| 773 | |
---|
[7317] | 774 | if verbose: log.critical('Closing all plots') |
---|
[6072] | 775 | |
---|
[5897] | 776 | pylab.close('all') |
---|
| 777 | del pylab |
---|
[6072] | 778 | |
---|
[7317] | 779 | if verbose: log.critical('Finished closing plots') |
---|
[5897] | 780 | |
---|
[6072] | 781 | ## |
---|
| 782 | # @brief Return min and max of an iterable. |
---|
| 783 | # @param list The iterable to return min & max of. |
---|
| 784 | # @return (min, max) of 'list'. |
---|
[5897] | 785 | def get_min_max_values(list=None): |
---|
| 786 | """ |
---|
| 787 | Returns the min and max of the list it was provided. |
---|
| 788 | """ |
---|
[6072] | 789 | |
---|
[7317] | 790 | if list == None: log.critical('List must be provided') |
---|
[5897] | 791 | |
---|
| 792 | return min(list), max(list) |
---|
| 793 | |
---|
| 794 | |
---|
[6072] | 795 | ## |
---|
| 796 | # @brief Get runup around a point in a CSV file. |
---|
| 797 | # @param gauge_filename gauge file name. |
---|
| 798 | # @param sww_filename SWW file name. |
---|
| 799 | # @param runup_filename Name of file to report into. |
---|
| 800 | # @param size ?? |
---|
| 801 | # @param verbose ?? |
---|
[5897] | 802 | def get_runup_data_for_locations_from_file(gauge_filename, |
---|
| 803 | sww_filename, |
---|
| 804 | runup_filename, |
---|
| 805 | size=10, |
---|
| 806 | verbose=False): |
---|
| 807 | """this will read a csv file with the header x,y. Then look in a square |
---|
| 808 | 'size'x2 around this position for the 'max_inundaiton_height' in the |
---|
[6072] | 809 | 'sww_filename' and report the findings in the 'runup_filename'. |
---|
[5897] | 810 | |
---|
| 811 | WARNING: NO TESTS! |
---|
| 812 | """ |
---|
| 813 | |
---|
[7735] | 814 | from anuga.shallow_water.data_manager import \ |
---|
[7737] | 815 | get_maximum_inundation_data |
---|
[5897] | 816 | |
---|
[6072] | 817 | file = open(runup_filename, "w") |
---|
[5897] | 818 | file.write("easting,northing,runup \n ") |
---|
| 819 | file.close() |
---|
| 820 | |
---|
| 821 | #read gauge csv file to dictionary |
---|
[7737] | 822 | attribute_dic, title_index_dic = load_csv_as_dict(gauge_filename) |
---|
[5897] | 823 | northing = [float(x) for x in attribute_dic["y"]] |
---|
| 824 | easting = [float(x) for x in attribute_dic["x"]] |
---|
| 825 | |
---|
[7317] | 826 | log.critical('Reading %s' % sww_filename) |
---|
[5897] | 827 | |
---|
| 828 | runup_locations=[] |
---|
| 829 | for i, x in enumerate(northing): |
---|
| 830 | poly = [[int(easting[i]+size),int(northing[i]+size)], |
---|
| 831 | [int(easting[i]+size),int(northing[i]-size)], |
---|
| 832 | [int(easting[i]-size),int(northing[i]-size)], |
---|
| 833 | [int(easting[i]-size),int(northing[i]+size)]] |
---|
| 834 | |
---|
| 835 | run_up, x_y = get_maximum_inundation_data(filename=sww_filename, |
---|
[6072] | 836 | polygon=poly, |
---|
| 837 | verbose=False) |
---|
| 838 | |
---|
[5897] | 839 | #if no runup will return 0 instead of NONE |
---|
| 840 | if run_up==None: run_up=0 |
---|
| 841 | if x_y==None: x_y=[0,0] |
---|
| 842 | |
---|
| 843 | if verbose: |
---|
[7317] | 844 | log.critical('maximum inundation runup near %s is %s meters' |
---|
| 845 | % (x_y, run_up)) |
---|
[5897] | 846 | |
---|
| 847 | #writes to file |
---|
[6072] | 848 | file = open(runup_filename, "a") |
---|
| 849 | temp = '%s,%s,%s \n' % (x_y[0], x_y[1], run_up) |
---|
[5897] | 850 | file.write(temp) |
---|
| 851 | file.close() |
---|
| 852 | |
---|
[6072] | 853 | ## |
---|
| 854 | # @brief ?? |
---|
[6314] | 855 | # @param ?? |
---|
[6072] | 856 | # @param gauge_file ?? |
---|
| 857 | # @param out_name ?? |
---|
| 858 | # @param quantities ?? |
---|
| 859 | # @param verbose ?? |
---|
| 860 | # @param use_cache ?? |
---|
[5897] | 861 | def sww2csv_gauges(sww_file, |
---|
| 862 | gauge_file, |
---|
[6035] | 863 | out_name='gauge_', |
---|
[6072] | 864 | quantities=['stage', 'depth', 'elevation', |
---|
| 865 | 'xmomentum', 'ymomentum'], |
---|
[5897] | 866 | verbose=False, |
---|
[6277] | 867 | use_cache=True): |
---|
[5897] | 868 | """ |
---|
| 869 | |
---|
| 870 | Inputs: |
---|
| 871 | NOTE: if using csv2timeseries_graphs after creating csv file, |
---|
| 872 | it is essential to export quantities 'depth' and 'elevation'. |
---|
| 873 | 'depth' is good to analyse gauges on land and elevation is used |
---|
| 874 | automatically by csv2timeseries_graphs in the legend. |
---|
| 875 | |
---|
| 876 | sww_file: path to any sww file |
---|
| 877 | |
---|
[6035] | 878 | gauge_file: Assumes that it follows this format |
---|
[5897] | 879 | name, easting, northing, elevation |
---|
| 880 | point1, 100.3, 50.2, 10.0 |
---|
| 881 | point2, 10.3, 70.3, 78.0 |
---|
| 882 | |
---|
| 883 | NOTE: order of column can change but names eg 'easting', 'elevation' |
---|
| 884 | must be the same! ALL lowercaps! |
---|
[6013] | 885 | |
---|
| 886 | out_name: prefix for output file name (default is 'gauge_') |
---|
[5897] | 887 | |
---|
| 888 | Outputs: |
---|
| 889 | one file for each gauge/point location in the points file. They |
---|
| 890 | will be named with this format in the same directory as the 'sww_file' |
---|
[6013] | 891 | <out_name><name>.csv |
---|
| 892 | eg gauge_point1.csv if <out_name> not supplied |
---|
| 893 | myfile_2_point1.csv if <out_name> ='myfile_2_' |
---|
[5897] | 894 | |
---|
| 895 | They will all have a header |
---|
| 896 | |
---|
[6035] | 897 | Usage: sww2csv_gauges(sww_file='test1.sww', |
---|
[5897] | 898 | quantities = ['stage', 'elevation','depth','bearing'], |
---|
| 899 | gauge_file='gauge.txt') |
---|
| 900 | |
---|
| 901 | Interpolate the quantities at a given set of locations, given |
---|
| 902 | an sww file. |
---|
| 903 | The results are written to a csv file. |
---|
| 904 | |
---|
| 905 | In the future let points be a points file. |
---|
| 906 | And the user choose the quantities. |
---|
| 907 | |
---|
| 908 | This is currently quite specific. |
---|
[6035] | 909 | If it needs to be more general, change things. |
---|
[5897] | 910 | |
---|
| 911 | This is really returning speed, not velocity. |
---|
| 912 | """ |
---|
[7679] | 913 | from gauge import sww2csv_gauges as sww2csv |
---|
[7685] | 914 | |
---|
[7679] | 915 | return sww2csv(sww_file, gauge_file, out_name, quantities, verbose, use_cache) |
---|
[7685] | 916 | |
---|
[7679] | 917 | def sww2timeseries(swwfiles, |
---|
| 918 | gauge_filename, |
---|
| 919 | production_dirs, |
---|
| 920 | report=None, |
---|
| 921 | reportname=None, |
---|
| 922 | plot_quantity=None, |
---|
| 923 | generate_fig=False, |
---|
| 924 | surface=None, |
---|
| 925 | time_min=None, |
---|
| 926 | time_max=None, |
---|
| 927 | time_thinning=1, |
---|
| 928 | time_unit=None, |
---|
| 929 | title_on=None, |
---|
| 930 | use_cache=False, |
---|
| 931 | verbose=False, |
---|
| 932 | output_centroids=False): |
---|
| 933 | from gauge import sww2timeseries as sww2timeseries_new |
---|
| 934 | return sww2timeseries_new(swwfiles, |
---|
| 935 | gauge_filename, |
---|
| 936 | production_dirs, |
---|
| 937 | report, |
---|
| 938 | reportname, |
---|
| 939 | plot_quantity, |
---|
| 940 | generate_fig, |
---|
| 941 | surface, |
---|
| 942 | time_min, |
---|
| 943 | time_max, |
---|
| 944 | time_thinning, |
---|
| 945 | time_unit, |
---|
| 946 | title_on, |
---|
| 947 | use_cache, |
---|
| 948 | verbose, |
---|
[7685] | 949 | output_centroids) |
---|
| 950 | |
---|
[6072] | 951 | ## |
---|
| 952 | # @brief Get a wave height at a certain depth given wave height at another depth. |
---|
| 953 | # @param d1 The first depth. |
---|
| 954 | # @param d2 The second depth. |
---|
| 955 | # @param h1 Wave ampitude at d1 |
---|
| 956 | # @param verbose True if this function is to be verbose. |
---|
| 957 | # @return The wave height at d2. |
---|
| 958 | def greens_law(d1, d2, h1, verbose=False): |
---|
| 959 | """Green's Law |
---|
[5897] | 960 | |
---|
| 961 | Green's Law allows an approximation of wave amplitude at |
---|
| 962 | a given depth based on the fourh root of the ratio of two depths |
---|
| 963 | and the amplitude at another given depth. |
---|
| 964 | |
---|
| 965 | Note, wave amplitude is equal to stage. |
---|
| 966 | |
---|
| 967 | Inputs: |
---|
| 968 | |
---|
| 969 | d1, d2 - the two depths |
---|
| 970 | h1 - the wave amplitude at d1 |
---|
| 971 | h2 - the derived amplitude at d2 |
---|
| 972 | |
---|
| 973 | h2 = h1 (d1/d2)^(1/4), where d2 cannot equal 0. |
---|
| 974 | |
---|
| 975 | """ |
---|
| 976 | |
---|
| 977 | d1 = ensure_numeric(d1) |
---|
| 978 | d2 = ensure_numeric(d2) |
---|
| 979 | h1 = ensure_numeric(h1) |
---|
| 980 | |
---|
| 981 | if d1 <= 0.0: |
---|
[6072] | 982 | msg = 'the first depth, d1 (%f), must be strictly positive' % (d1) |
---|
[5897] | 983 | raise Exception(msg) |
---|
| 984 | |
---|
| 985 | if d2 <= 0.0: |
---|
[6072] | 986 | msg = 'the second depth, d2 (%f), must be strictly positive' % (d2) |
---|
[5897] | 987 | raise Exception(msg) |
---|
| 988 | |
---|
| 989 | if h1 <= 0.0: |
---|
[6072] | 990 | msg = 'the wave amplitude, h1 (%f), must be strictly positive' % (h1) |
---|
[5897] | 991 | raise Exception(msg) |
---|
| 992 | |
---|
| 993 | h2 = h1*(d1/d2)**0.25 |
---|
| 994 | |
---|
| 995 | assert h2 > 0 |
---|
| 996 | |
---|
| 997 | return h2 |
---|
| 998 | |
---|
| 999 | |
---|
[6072] | 1000 | ## |
---|
| 1001 | # @brief Get the square-root of a value. |
---|
| 1002 | # @param s The value to get the square-root of. |
---|
| 1003 | # @return The square-root of 's'. |
---|
[5897] | 1004 | def square_root(s): |
---|
| 1005 | return sqrt(s) |
---|
[6072] | 1006 | |
---|
| 1007 | |
---|