Changeset 9344 for trunk/anuga_work/development
- Timestamp:
- Sep 24, 2014, 12:41:57 PM (11 years ago)
- Location:
- trunk/anuga_work/development/gareth/template_scenarios/tsunami
- Files:
-
- 2 added
- 19 deleted
- 2 edited
- 8 moved
Legend:
- Unmodified
- Added
- Removed
-
trunk/anuga_work/development/gareth/template_scenarios/tsunami/tsunami3/scripts/run_model.py
r9341 r9344 36 36 # Routines to setup the domain 37 37 38 import setup_boundary_conditions 39 import setup_rainfall 40 import setup_inlets 41 import setup_mesh 42 import setup_initial_conditions 38 from setup import setup_boundary_conditions 39 from setup import setup_rainfall 40 from setup import setup_inlets 41 from setup import setup_mesh 42 from setup import setup_initial_conditions 43 from setup import raster_outputs 44 from setup.prepare_data import PrepareData 43 45 44 46 # Routines defined by the user … … 48 50 # Functions to make geotifs 49 51 50 import raster_outputs51 52 52 53 # Record the time so we can report how long the simulation takes … … 56 57 # Get key data for the simulation 57 58 58 from prepare_data import PrepareData59 59 project = PrepareData('ANUGA_setup.xls') 60 60 -
trunk/anuga_work/development/gareth/template_scenarios/tsunami/tsunami3/scripts/setup/parse_input_data.py
r9343 r9344 7 7 8 8 """ 9 10 9 11 import os 10 12 import xlrd 13 import glob 14 11 15 12 16 class ProjectData(object): … … 25 29 file_extension = os.path.splitext(filename)[1] 26 30 27 if (file_extension in ['.xls', '.xlsx']):31 if file_extension in ['.xls', '.xlsx']: 28 32 self.get_data_from_excel(filename) 29 33 else: 30 34 msg = 'File type not supported' 31 35 raise ValueError(msg) 32 33 36 34 37 def get_data_from_excel(self, filename): … … 43 46 44 47 # Worksheet names -- define here so they can easily be changed 48 45 49 project_ws = 'project_settings' 46 50 mesh_ws = 'mesh' … … 57 61 # ##################################################################### 58 62 59 self.scenario = data_source.get_var( 60 project_ws, 'scenario', [0, 1], post_process =str)63 self.scenario = data_source.get_var(project_ws, 'scenario', [0, 1], 64 post_process=str) 61 65 62 66 self.output_basedir = data_source.get_var( 63 project_ws, 'output_base_directory', [0, 1], post_process= str) 67 project_ws, 'output_base_directory', [ 68 0, 1], post_process=str) 64 69 65 70 self.yieldstep = data_source.get_var(project_ws, 'yieldstep', [0, 1], 66 post_process=float)71 post_process=float) 67 72 68 73 self.finaltime = data_source.get_var(project_ws, 'finaltime', [0, 1], 69 post_process=float)74 post_process=float) 70 75 71 76 self.projection_information = data_source.get_var( 72 project_ws, 'projection_information', [0, 1]) 77 project_ws, 'projection_information', [ 78 0, 1]) 73 79 74 80 # Could be unicode string or float -- if float, we need int … … 76 82 if isinstance(self.projection_information, float): 77 83 self.projection_information = int(self.projection_information) 78 if type(self.projection_information) is unicode:84 if isinstance(self.projection_information, unicode): 79 85 self.projection_information = str(self.projection_information) 80 86 81 self.flow_algorithm = data_source.get_var(project_ws, 'flow_algorithm', 82 [0, 1], post_process = str) 87 self.flow_algorithm = data_source.get_var( 88 project_ws, 'flow_algorithm', [ 89 0, 1], post_process=str) 83 90 84 91 # Coerce this to a logical variable … … 86 93 self.use_local_extrapolation_and_flux_updating = \ 87 94 data_source.get_var(project_ws, 88 'use_local_extrapolation_and_flux_updating',89 offset=[0, 1], post_process=bool)95 'use_local_extrapolation_and_flux_updating', 96 offset=[0, 1], post_process=bool) 90 97 91 98 self.output_tif_cellsize = data_source.get_var( 92 99 project_ws, 93 100 'output_tif_cellsize', 94 offset=[0, 1], 101 offset=[ 102 0, 103 1], 95 104 post_process=float) 96 105 … … 98 107 project_ws, 99 108 'output_tif_bounding_polygon', 100 offset=[0, 1], 109 offset=[ 110 0, 111 1], 101 112 post_process=empty_string_to_none) 102 113 … … 104 115 project_ws, 105 116 'max_quantity_update_frequency', 106 offset=[0, 1], 117 offset=[ 118 0, 119 1], 107 120 post_process=int) 108 121 … … 110 123 project_ws, 111 124 'max_quantity_collection_starttime', 112 offset=[0, 1], 113 post_process = float) 125 offset=[ 126 0, 127 1], 128 post_process=float) 114 129 115 130 self.store_vertices_uniquely = data_source.get_var( 116 131 project_ws, 117 132 'store_vertices_uniquely', 118 offset=[0, 1], 133 offset=[ 134 0, 135 1], 119 136 post_process=bool) 120 137 … … 122 139 project_ws, 123 140 'spatial_text_output_dir', 124 offset = [0, 1], 141 offset=[ 142 0, 143 1], 125 144 post_process=str) 126 145 … … 132 151 133 152 self.bounding_polygon_and_tags_file = data_source.get_var( 134 mesh_ws, 'bounding_polygon', [1, 1], post_process=str) 153 mesh_ws, 'bounding_polygon', [ 154 1, 1], post_process=str) 135 155 136 156 self.default_res = data_source.get_var(mesh_ws, 'bounding_polygon', 137 [2, 1], post_process = str) 138 139 # Loop to read the interior regions data 140 # Finish when we hit an empty cell 157 [2, 1], post_process=str) 141 158 142 159 self.interior_regions_data = data_source.get_paired_list( 143 mesh_ws, 'interior_regions', [ 1, 1],144 post_process =string_or_float)160 mesh_ws, 'interior_regions', [ 161 1, 1], post_process=string_or_float) 145 162 146 163 # breaklines, riverwalls, point movement threshold 147 148 self.breakline_files = data_source.get_list(mesh_ws, 'breakline_files', 149 [0, 1], post_process = str) 150 151 self.riverwall_csv_files = data_source.get_list( 152 mesh_ws, 'riverwall_csv_files', [0, 1], post_process = str) 164 # We allow wildcard type names as input files 165 166 breakline_files = data_source.get_list( 167 mesh_ws, 'breakline_files', [ 168 0, 1], post_process=str) 169 170 riverwall_csv_files = data_source.get_list( 171 mesh_ws, 'riverwall_csv_files', [ 172 0, 1], post_process=str) 173 174 # Allow wildcard names as input for breaklines 175 176 self.breakline_files = [] 177 if len(breakline_files) > 0: 178 self.breakline_files = [self.breakline_files + glob.glob(bl_file) 179 for bl_file in breakline_files] 180 181 # Allow wildcards for breaklines 182 183 self.riverwall_csv_files = [] 184 if len(riverwall_csv_files) > 0: 185 self.riverwall_csv_files = [ 186 self.riverwall_csv_files + 187 glob.glob(rw_file) for rw_file in riverwall_csv_files] 153 188 154 189 self.break_line_intersect_point_movement_threshold = \ 155 190 data_source.get_var(mesh_ws, 'breakline_intersection_threshold', 156 [0, 1], post_process =float)191 [0, 1], post_process=float) 157 192 158 193 self.pt_areas = data_source.get_var( 159 mesh_ws, 'region_areas_file', [0, 1], empty_string_to_none) 194 mesh_ws, 'region_areas_file', [ 195 0, 1], empty_string_to_none) 160 196 161 197 # Check we are only using interior_region OR breakline methods 198 162 199 use_ir = self.interior_regions_data != [] 163 use_bl = ( 164 (self.breakline_files != []) or\ 165 ((self.riverwall_csv_files != []) or\ 166 (self.pt_areas is not None)) 167 ) 168 169 if (use_ir and use_bl): 170 msg = ' In worksheet ' + mesh_ws +\ 171 'Cannot have both *interior_regions_data* and *breaklines ' \ 200 use_bl = self.breakline_files != [] or self.riverwall_csv_files != [] \ 201 or self.pt_areas is not None 202 203 if use_ir and use_bl: 204 msg = ' In worksheet ' + mesh_ws \ 205 + 'Cannot have both *interior_regions_data* and *breaklines ' \ 172 206 + '/ riverwalls / region point areas* being non-empty.' 173 207 raise ValueError(msg) … … 180 214 181 215 self.elevation_data = data_source.get_paired_list( 182 init_ws, 'Elevation', [1, 1], post_process = string_or_float) 216 init_ws, 'Elevation', [ 217 1, 1], post_process=string_or_float) 183 218 184 219 self.friction_data = data_source.get_paired_list( 185 init_ws, 'Friction', [1, 1], post_process = string_or_float) 220 init_ws, 'Friction', [ 221 1, 1], post_process=string_or_float) 186 222 187 223 self.stage_data = data_source.get_paired_list( 188 init_ws, 'Stage', [1, 1], post_process = string_or_float) 224 init_ws, 'Stage', [ 225 1, 1], post_process=string_or_float) 189 226 190 227 self.xmomentum_data = data_source.get_paired_list( 191 init_ws, 'X-velocity_times_depth', [1, 1],192 post_process =string_or_float)228 init_ws, 'X-velocity_times_depth', 229 [1, 1], post_process=string_or_float) 193 230 194 231 self.ymomentum_data = data_source.get_paired_list( 195 init_ws, 'Y-velocity_times_depth', [1, 1], 196 post_process =string_or_float)232 init_ws, 'Y-velocity_times_depth', [1, 1], 233 post_process=string_or_float) 197 234 198 235 self.elevation_additions = data_source.get_paired_list( 199 init_add_ws, 'Elevation', [ 1, 1],200 post_process =string_or_float)236 init_add_ws, 'Elevation', [ 237 1, 1], post_process=string_or_float) 201 238 202 239 self.friction_additions = data_source.get_paired_list( 203 init_add_ws, 'Friction', [ 1, 1],204 post_process =string_or_float)240 init_add_ws, 'Friction', [ 241 1, 1], post_process=string_or_float) 205 242 206 243 self.stage_additions = data_source.get_paired_list( 207 init_add_ws, 'Stage', [ 1, 1],208 post_process =string_or_float)244 init_add_ws, 'Stage', [ 245 1, 1], post_process=string_or_float) 209 246 210 247 self.xmomentum_additions = data_source.get_paired_list( 211 248 init_add_ws, 'X-velocity_times_depth', [1, 1], 212 post_process =string_or_float)249 post_process=string_or_float) 213 250 214 251 self.ymomentum_additions = data_source.get_paired_list( 215 252 init_add_ws, 'Y-velocity_times_depth', [1, 1], 216 post_process =string_or_float)253 post_process=string_or_float) 217 254 218 255 # ##################################################################### … … 223 260 224 261 # This can fail if we don't coerce to str 262 225 263 self.boundary_tags_attribute_name = data_source.get_var( 226 boundary_ws, 'boundary_tags_attribute_name', [0, 1], str) 264 boundary_ws, 'boundary_tags_attribute_name', [ 265 0, 1], str) 227 266 228 267 self.boundary_data = data_source.get_subtable( 229 boundary_ws, 'Boundary_Condition_Tag', [1, 0], 230 post_process = string_or_float) 231 268 boundary_ws, 'Boundary_Condition_Tag', [ 269 1, 0], post_process=string_or_float) 232 270 233 271 # ##################################################################### … … 237 275 # ##################################################################### 238 276 239 self.inlet_data = data_source.get_subtable( 240 inlet_ws, 'Inlet_name', [1,0])277 self.inlet_data = data_source.get_subtable(inlet_ws, 'Inlet_name', [1, 278 0]) 241 279 242 280 # ##################################################################### … … 246 284 # ##################################################################### 247 285 248 self.rain_data = data_source.get_subtable( 249 rain_ws, 'Rainfall', [1, 1]) 250 251 # ##################################################################### 252 253 254 255 256 257 # ############################################################################# 286 self.rain_data = data_source.get_subtable(rain_ws, 'Rainfall', [1, 1]) 287 288 # ##################################################################### 289 290 ############################################################################## 258 291 # 259 292 # END OF CLASS 260 293 # 261 # ############################################################################# 294 ############################################################################## 295 262 296 263 297 class AnugaXls(object): … … 307 341 308 342 # Loop through the rows and look for a match in the first column 343 309 344 for i in range(len(sheet_data)): 310 345 if flag in sheet_data[i][0]: … … 320 355 321 356 if matching_row + offset[0] >= len(sheet_data): 322 return ""357 return '' 323 358 324 359 # If the col offset exceeds the data length, return a blank 360 325 361 if offset[1] < len(sheet_data[i]): 326 362 output = sheet_data[i + offset[0]][offset[1]] … … 338 374 flag, 339 375 offset=[0, 0], 340 post_process =None,376 post_process=None, 341 377 ): 342 378 """Find the cell in the first column of worksheet_name that (partially) … … 366 402 else: 367 403 cell_x = offset[0] + 1 368 next_row2 = self.get_var( worksheet_name, flag,369 [cell_x, cell_y],370 404 next_row2 = self.get_var( 405 worksheet_name, flag, [ 406 cell_x, cell_y], post_process) 371 407 372 408 paired_list.append([next_row1, next_row2]) … … 398 434 cell_x = offset[0] 399 435 cell_y = counter + offset[1] 400 next_row1 = self.get_var(worksheet_name, flag, 401 [cell_x, cell_y], 436 next_row1 = self.get_var(worksheet_name, flag, [cell_x, cell_y], 402 437 post_process) 403 438 if next_row1 == '': … … 415 450 flag, 416 451 offset=[0, 0], 417 post_process=None 452 post_process=None, 418 453 ): 419 454 """Find the cell in the first column of worksheet_name that (partially) 420 421 422 423 424 offset fom the matching cell by offset = [nrow, ncol],425 426 427 428 455 matches 'flag', and read data from a nearby 'sub-table' in the 456 spreadsheet as a list of lists. 457 458 The 'sub-table' starts at a location 459 offset fom the matching cell by offset = [nrow, ncol], 460 and finishes when a blank row of the sub-table is found 461 462 Rows can have different numbers of non-empty columns, but each row 463 ends when its first empty cell is found 429 464 """ 430 431 out_list = [ 465 466 out_list = [] 432 467 433 468 counter = -1 434 while True: 469 while True: 435 470 counter = counter + 1 436 471 cell_x = offset[0] + counter 437 472 cell_y = offset[1] 438 next_row = self.get_list(worksheet_name, flag, 439 [cell_x, cell_y], 473 next_row = self.get_list(worksheet_name, flag, [cell_x, cell_y], 440 474 post_process) 441 475 if next_row == []: … … 446 480 return out_list 447 481 448 # ############################################################################# 482 483 ############################################################################## 449 484 # 450 # END OF CLASS 451 # 452 # ############################################################################# 453 485 # END OF CLASS 486 # 487 ############################################################################## 454 488 455 489 # Functions below here … … 478 512 def empty_string_to_none(string): 479 513 """Given a string, return None if string == "", and the string value 480 514 (with type str) otherwise 481 515 """ 482 516 … … 489 523 def string_or_float(value): 490 524 """ If value is unicode, return a value with type string. 491 492 525 If value is a number, return it with type float 526 If value is anything else, return value 493 527 """ 494 if type(value) == unicode: 528 529 if isinstance(value, unicode): 495 530 return str(value) 496 531 elif isinstance(value, (int, long, float, complex)): … … 498 533 else: 499 534 return value 500 -
trunk/anuga_work/development/gareth/template_scenarios/tsunami/tsunami3/scripts/setup/prepare_data.py
r9343 r9344 21 21 # Local modules 22 22 import user_functions 23 from parse_input_data import ProjectData 23 from setup.parse_input_data import ProjectData 24 24 25 25 26 class PrepareData(ProjectData): … … 27 28 def __init__(self, filename): 28 29 """Parse the input data then process it for ANUGA 29 30 """ 31 # Get the 'raw' data 30 31 """ 32 # Get the 'raw' data 32 33 ProjectData.__init__(self, filename) 33 34 … … 70 71 71 72 # Hack to override resolution 72 # region_point_areas=[ region_point_areas[i][0:2]+[150*150*0.5] for i in \ 73 # region_point_areas=\ 74 # [ region_point_areas[i][0:2]+[150*150*0.5] for i in \ 73 75 # range(len(region_point_areas))] 74 76 … … 76 78 77 79 self.interior_regions = [[su.read_polygon(ir[0]), ir[1]] for ir in 78 self.interior_regions_data] 79 80 # Deal with intersections in the bounding polygon / breaklines / riverwalls 80 self.interior_regions_data] 81 82 # Deal with intersections in the bounding polygon / breaklines / 83 # riverwalls 81 84 82 85 (self.bounding_polygon, self.breaklines, self.riverwalls) = \ 83 86 su.add_intersections_to_domain_features( 84 self.bounding_polygon, 85 self.breaklines, 87 self.bounding_polygon, 88 self.breaklines, 86 89 self.riverwalls, 87 point_movement_threshold=\ 88 self.break_line_intersect_point_movement_threshold, 90 point_movement_threshold=self.break_line_intersect_point_movement_threshold, 89 91 verbose=True) 90 92 … … 113 115 # 114 116 115 if type(self.projection_information) is int:117 if isinstance(self.projection_information, int): 116 118 117 119 # projection_information describes a UTM zone … … 126 128 + str(self.projection_information) \ 127 129 + ' +datum=WGS84 +units=m +no_defs' 128 elif type(self.projection_information) is str:130 elif isinstance(self.projection_information, str): 129 131 self.proj4string = self.projection_information 130 132 else: … … 146 148 147 149 self.output_dir = self.output_basedir + 'RUN_' + str(runtime) +\ 148 150 '_' + self.scenario 149 151 self.partition_basedir = 'PARTITIONS/' 150 152 self.partition_dir = self.partition_basedir + 'Mesh_' +\ 151 153 str(self.mesh_id_hash) 152 154 self.meshname = self.output_dir + '/mesh.tsh' 153 154 155 155 156 def setup_directory_structure(self): … … 215 216 for i in range(len(self.breaklines)): 216 217 sav_lines(self.breaklines.values()[i], 217 filename=self.spatial_text_output_dir + \218 218 filename=self.spatial_text_output_dir + 219 '/breakline_' + str(i) + '.txt') 219 220 if self.riverwalls is not {}: 220 221 for i in range(len(self.riverwalls)): 221 222 sav_lines(self.riverwalls.values()[i], 222 filename=self.spatial_text_output_dir + \223 224 225 sav_lines(self.bounding_polygon, 223 filename=self.spatial_text_output_dir + 224 '/riverwall_' + str(i) + '.txt') 225 226 sav_lines(self.bounding_polygon, 226 227 filename=self.spatial_text_output_dir 227 228 + '/boundingPoly.txt') … … 235 236 # directory 236 237 237 shutil.copytree(self.spatial_text_output_dir, 238 self.output_dir + \239 238 shutil.copytree(self.spatial_text_output_dir, 239 self.output_dir + 240 '/' + self.spatial_text_output_dir) 240 241 else: 241 242 pass -
trunk/anuga_work/development/gareth/template_scenarios/tsunami/tsunami3/scripts/setup/setup_boundary_conditions.py
r9343 r9344 51 51 skip_header=1) 52 52 # Make start time = 0 53 boundary_data[:, 0]-=start_time53 boundary_data[:, 0] -= start_time 54 54 # Make interpolation function 55 55 stage_time_fun = scipy.interpolate.interp1d( 56 boundary_data[:, 0],57 boundary_data[:, 1])58 # Make boundary condition 56 boundary_data[:, 0], 57 boundary_data[:, 1]) 58 # Make boundary condition 59 59 boundary_function = \ 60 60 asb.Transmissive_n_momentum_zero_t_momentum_set_stage_boundary( 61 61 domain, 62 62 stage_time_fun) 63 63 64 64 boundary_tags_and_conditions[boundary_tag] = \ 65 65 boundary_function … … 68 68 msg = 'Boundary condition type ' + boundary_condition_type +\ 69 69 ' for tag ' + boundary_tag + ' is not implemented' 70 71 72 70 73 71 # Check that all tags in boundary_info have been set … … 80 78 raise Exception(msg) 81 79 82 # Set the boundary 80 # Set the boundary 83 81 84 82 domain.set_boundary(boundary_tags_and_conditions) -
trunk/anuga_work/development/gareth/template_scenarios/tsunami/tsunami3/scripts/user_functions.py
r9343 r9344 15 15 """ 16 16 17 17 import os 18 18 import anuga 19 19 from anuga_parallel import myid, numprocs, finalize, barrier 20 import ogr 20 21 21 22 … … 65 66 """ 66 67 68 if not os.path.exists(shapefile_name): 69 msg = 'Cannot find file ' + shapefile_name 70 raise ValueError(msg) 71 67 72 # Step 1: Read the data 68 69 import ogr70 71 73 data_source = ogr.Open(shapefile_name) 72 74 … … 97 99 # by dropping the last point 98 100 if bounding_polygon[0] == bounding_polygon[-1]: 99 bounding_polygon = [bounding_polygon[k] for k in range(len(bounding_polygon)-1)] 101 lbp = len(bounding_polygon)-1 102 bounding_polygon = [bounding_polygon[k] for k in range(lbp)] 100 103 101 104 for i in range(boundary_segnum - 1): … … 103 106 # starts at the end point of bounding_polygon 104 107 105 found_match = False # Flag for error if no matching segments found108 found_match = False # Flag for error if no matching segments found 106 109 107 110 for j in range(boundary_segnum): … … 126 129 'polygon are not identical' 127 130 raise ValueError(msg) 128 131 129 132 # Remove the last point 130 133 bounding_polygon = [bounding_polygon[k] for k in … … 147 150 # Check that a 'match' was found for every 'i' iteration 148 151 if not found_match: 149 raise Exception , 'Did not find a match'152 raise Exception('Did not find a match') 150 153 151 154 return (bounding_polygon, boundary_tags)
Note: See TracChangeset
for help on using the changeset viewer.