Changeset 7765
- Timestamp:
- Jun 1, 2010, 2:24:36 PM (15 years ago)
- Location:
- trunk/anuga_core/source/anuga
- Files:
-
- 2 added
- 5 deleted
- 13 edited
Legend:
- Unmodified
- Added
- Removed
-
TabularUnified trunk/anuga_core/source/anuga/__init__.py ¶
r7751 r7765 9 9 10 10 11 # Make selected classes available directly12 11 13 # Boundaries specific to shallow water domain.14 from anuga.shallow_water.boundaries import Reflective_boundary,\15 Transmissive_momentum_set_stage_boundary,\16 Dirichlet_discharge_boundary,\17 Field_boundary,\18 Transmissive_stage_zero_momentum_boundary,\19 Transmissive_n_momentum_zero_t_momentum_set_stage_boundary20 21 # Boundaries generic across all forms of domain.22 from anuga.abstract_2d_finite_volumes.generic_boundary_conditions\23 import Transmissive_boundary, Dirichlet_boundary, \24 Time_boundary, File_boundary, AWI_boundary25 26 # Shallow water domain is the standard27 from anuga.shallow_water.shallow_water_domain import Domain -
TabularUnified trunk/anuga_core/source/anuga/abstract_2d_finite_volumes/util.py ¶
r7751 r7765 15 15 16 16 from anuga.utilities.numerical_tools import ensure_numeric, angle, NAN 17 from anuga. utilities.file_utilsimport load_csv_as_dict17 from anuga.file.csv_file import load_csv_as_dict 18 18 19 19 from math import sqrt, atan, degrees … … 630 630 631 631 from os import sep 632 from anuga.utilities.file_utils import load_csv_as_dict, \ 633 get_all_files_with_extension 632 from anuga.utilities.file_utils import get_all_files_with_extension 634 633 635 634 seconds_in_hour = 3600 -
TabularUnified trunk/anuga_core/source/anuga/compile_all.py ¶
r7616 r7765 14 14 os.chdir('..') 15 15 os.chdir('abstract_2d_finite_volumes') 16 execfile('..' + os.sep + 'utilities' + os.sep + 'compile.py') 17 18 os.chdir('..') 19 os.chdir('file') 16 20 execfile('..' + os.sep + 'utilities' + os.sep + 'compile.py') 17 21 -
TabularUnified trunk/anuga_core/source/anuga/damage_modelling/exposure.py ¶
r7743 r7765 4 4 DataMissingValuesError 5 5 6 from anuga. utilities.file_utilsimport load_csv_as_dict6 from anuga.file.csv_file import load_csv_as_dict 7 7 8 8 from anuga.geospatial_data.geospatial_data import Geospatial_data,\ -
TabularUnified trunk/anuga_core/source/anuga/file_conversion/__init__.py ¶
r7758 r7765 6 6 sys.path += __path__ 7 7 8 9 # commonly used imports10 from anuga.file_conversion.file_conversion import sww2obj, dat2obj, \11 timefile2netcdf, tsh2sww, urs2sww, urs2nc12 13 from anuga.file_conversion.dem2pts import dem2pts14 from anuga.file_conversion.esri2sww import esri2sww15 from anuga.file_conversion.sww2dem import sww2dem16 from anuga.file_conversion.asc2dem import asc2dem17 from anuga.file_conversion.ferret2sww import ferret2sww -
TabularUnified trunk/anuga_core/source/anuga/file_conversion/file_conversion.py ¶
r7758 r7765 403 403 404 404 405 ##406 # @brief Convert 3 URS files back to 4 NC files.407 # @param basename_in Stem of the input filenames.408 # @param basename_outStem of the output filenames.409 # @note The name of the urs file names must be:410 # [basename_in]-z-mux411 # [basename_in]-e-mux412 # [basename_in]-n-mux413 def urs2nc(basename_in='o', basename_out='urs'):414 """Convert the 3 urs files to 4 nc files.415 416 The name of the urs file names must be;417 [basename_in]-z-mux418 [basename_in]-e-mux419 [basename_in]-n-mux420 """421 422 files_in = [basename_in + WAVEHEIGHT_MUX_LABEL,423 basename_in + EAST_VELOCITY_LABEL,424 basename_in + NORTH_VELOCITY_LABEL]425 files_out = [basename_out + '_ha.nc',426 basename_out + '_ua.nc',427 basename_out + '_va.nc']428 quantities = ['HA', 'UA', 'VA']429 430 #if os.access(files_in[0]+'.mux', os.F_OK) == 0 :431 for i, file_name in enumerate(files_in):432 if os.access(file_name, os.F_OK) == 0:433 if os.access(file_name + '.mux', os.F_OK) == 0 :434 msg = 'File %s does not exist or is not accessible' % file_name435 raise IOError, msg436 else:437 files_in[i] += '.mux'438 log.critical("file_name %s" % file_name)439 440 hashed_elevation = None441 for file_in, file_out, quantity in map(None, files_in,442 files_out,443 quantities):444 lonlatdep, lon, lat, depth = _binary_c2nc(file_in,445 file_out,446 quantity)447 if hashed_elevation == None:448 elevation_file = basename_out + '_e.nc'449 write_elevation_nc(elevation_file,450 lon,451 lat,452 depth)453 hashed_elevation = myhash(lonlatdep)454 else:455 msg = "The elevation information in the mux files is inconsistent"456 assert hashed_elevation == myhash(lonlatdep), msg457 458 files_out.append(elevation_file)459 460 return files_out461 462 463 ##464 # @brief Convert a quantity URS file to a NetCDF file.465 # @param file_in Path to input URS file.466 # @param file_out Path to the output file.467 # @param quantity Name of the quantity to be written to the output file.468 # @return A tuple (lonlatdep, lon, lat, depth).469 def _binary_c2nc(file_in, file_out, quantity):470 """Reads in a quantity urs file and writes a quantity nc file.471 Additionally, returns the depth and lat, long info,472 so it can be written to a file.473 """474 475 columns = 3 # long, lat , depth476 mux_file = open(file_in, 'rb')477 478 # Number of points/stations479 (points_num,) = unpack('i', mux_file.read(4))480 481 # nt, int - Number of time steps482 (time_step_count,) = unpack('i', mux_file.read(4))483 484 #dt, float - time step, seconds485 (time_step,) = unpack('f', mux_file.read(4))486 487 msg = "Bad data in the mux file."488 if points_num < 0:489 mux_file.close()490 raise ANUGAError, msg491 if time_step_count < 0:492 mux_file.close()493 raise ANUGAError, msg494 if time_step < 0:495 mux_file.close()496 raise ANUGAError, msg497 498 lonlatdep = p_array.array('f')499 lonlatdep.read(mux_file, columns * points_num)500 lonlatdep = num.array(lonlatdep, dtype=num.float)501 lonlatdep = num.reshape(lonlatdep, (points_num, columns))502 503 lon, lat, depth = lon_lat2grid(lonlatdep)504 lon_sorted = list(lon)505 lon_sorted.sort()506 507 if not num.alltrue(lon == lon_sorted):508 msg = "Longitudes in mux file are not in ascending order"509 raise IOError, msg510 511 lat_sorted = list(lat)512 lat_sorted.sort()513 514 nc_file = Write_nc(quantity,515 file_out,516 time_step_count,517 time_step,518 lon,519 lat)520 521 for i in range(time_step_count):522 #Read in a time slice from mux file523 hz_p_array = p_array.array('f')524 hz_p_array.read(mux_file, points_num)525 hz_p = num.array(hz_p_array, dtype=num.float)526 hz_p = num.reshape(hz_p, (len(lon), len(lat)))527 hz_p = num.transpose(hz_p) # mux has lat varying fastest, nc has long v.f.528 529 #write time slice to nc file530 nc_file.store_timestep(hz_p)531 532 mux_file.close()533 nc_file.close()534 535 return lonlatdep, lon, lat, depth536 537 538 405 539 406 ## -
TabularUnified trunk/anuga_core/source/anuga/file_conversion/urs2sts.py ¶
r7761 r7765 1 from mux import WAVEHEIGHT_MUX2_LABEL, EAST_VELOCITY_MUX2_LABEL, \ 1 import numpy as num 2 3 # ANUGA modules 4 from anuga.config import netcdf_mode_r, netcdf_mode_w, netcdf_mode_a, \ 5 netcdf_float 6 from anuga.utilities.numerical_tools import ensure_numeric 7 from anuga.coordinate_transforms.geo_reference import Geo_reference, \ 8 write_NetCDF_georeference, ensure_geo_reference 9 10 # local modules 11 from anuga.file.mux import read_mux2_py 12 from anuga.file.mux import WAVEHEIGHT_MUX2_LABEL, EAST_VELOCITY_MUX2_LABEL, \ 2 13 NORTH_VELOCITY_MUX2_LABEL 14 from anuga.file.sts import Write_sts 15 3 16 4 17 ## -
TabularUnified trunk/anuga_core/source/anuga/shallow_water/boundaries.py ¶
r7735 r7765 129 129 def __repr__(self): 130 130 """ Return a representation of this instance. """ 131 return 'Transmissive_momentum_set_stage_boundary(%s)' % self.domain131 return 'Transmissive_momentum_set_stage_boundary(%s)' % self.domain 132 132 133 133 def evaluate(self, vol_id, edge_id): … … 210 210 def __repr__(self): 211 211 """ Return a representation of this instance. """ 212 msg ='Transmissive_n_momentum_zero_t_momentum_set_stage_boundary'213 msg +='(%s)' %self.domain212 msg = 'Transmissive_n_momentum_zero_t_momentum_set_stage_boundary' 213 msg += '(%s)' % self.domain 214 214 return msg 215 215 -
TabularUnified trunk/anuga_core/source/anuga/shallow_water/data_manager.py ¶
r7759 r7765 94 94 95 95 from anuga.utilities.file_utils import create_filename,\ 96 get_all_swwfiles, load_csv_as_dict 96 get_all_swwfiles 97 from anuga.file.csv_file import load_csv_as_dict 97 98 from sww_file import Read_sww, Write_sww 98 99 … … 452 453 files_out.append(file_out) 453 454 return files_out 454 455 456 ##457 # @brief458 # @param production_dirs459 # @param output_dir460 # @param scenario_name461 # @param gauges_dir_name462 # @param plot_quantity463 # @param generate_fig464 # @param reportname465 # @param surface466 # @param time_min467 # @param time_max468 # @param title_on469 # @param verbose470 # @param nodes471 def get_timeseries(production_dirs, output_dir, scenario_name, gauges_dir_name,472 plot_quantity, generate_fig=False,473 reportname=None, surface=False, time_min=None,474 time_max=None, title_on=False, verbose=True,475 nodes=None):476 """477 nodes - number of processes used.478 479 warning - this function has no tests480 """481 482 if reportname == None:483 report = False484 else:485 report = True486 487 if nodes is None:488 is_parallel = False489 else:490 is_parallel = True491 492 # Generate figures493 swwfiles = {}494 if is_parallel is True:495 for i in range(nodes):496 log.critical('Sending node %d of %d' % (i, nodes))497 swwfiles = {}498 if not reportname == None:499 reportname = report_name + '_%s' % i500 for label_id in production_dirs.keys():501 if label_id == 'boundaries':502 swwfile = best_boundary_sww503 else:504 file_loc = output_dir + label_id + sep505 sww_extra = '_P%s_%s' % (i, nodes)506 swwfile = file_loc + scenario_name + sww_extra + '.sww'507 log.critical('swwfile %s' % swwfile)508 swwfiles[swwfile] = label_id509 510 texname, elev_output = sww2timeseries(swwfiles,511 gauges_dir_name,512 production_dirs,513 report=report,514 reportname=reportname,515 plot_quantity=plot_quantity,516 generate_fig=generate_fig,517 surface=surface,518 time_min=time_min,519 time_max=time_max,520 title_on=title_on,521 verbose=verbose)522 else:523 for label_id in production_dirs.keys():524 if label_id == 'boundaries':525 log.critical('boundaries')526 file_loc = project.boundaries_in_dir527 swwfile = project.boundaries_dir_name3 + '.sww'528 # swwfile = boundary_dir_filename529 else:530 file_loc = output_dir + label_id + sep531 swwfile = file_loc + scenario_name + '.sww'532 swwfiles[swwfile] = label_id533 534 texname, elev_output = sww2timeseries(swwfiles,535 gauges_dir_name,536 production_dirs,537 report=report,538 reportname=reportname,539 plot_quantity=plot_quantity,540 generate_fig=generate_fig,541 surface=surface,542 time_min=time_min,543 time_max=time_max,544 title_on=title_on,545 verbose=verbose)546 547 548 ################################################################################549 # End of obsolete functions550 ################################################################################551 455 552 456 … … 1542 1446 1543 1447 1544 ################################################################################1545 # READ MUX2 FILES line of points1546 ################################################################################1547 1548 WAVEHEIGHT_MUX2_LABEL = '-z-mux2'1549 EAST_VELOCITY_MUX2_LABEL = '-e-mux2'1550 NORTH_VELOCITY_MUX2_LABEL = '-n-mux2'1551 1552 ##1553 # @brief1554 # @param filenames List of mux2 format input filenames.1555 # @param weights Weights associated with each source file.1556 # @param permutation The gauge numbers for which data is extracted.1557 # @param verbose True if this function is to be verbose.1558 # @return (times, latitudes, longitudes, elevation, quantity, starttime)1559 def read_mux2_py(filenames,1560 weights=None,1561 permutation=None,1562 verbose=False):1563 """Access the mux files specified in the filenames list. Combine the1564 data found therin as a weighted linear sum as specifed by the weights.1565 If permutation is None or empty extract timeseries data for all gauges1566 within the files.1567 1568 Input:1569 filenames: List of filenames specifiying the file containing the1570 timeseries data (mux2 format) for each source1571 weights: Weighs associated with each source1572 (defaults to 1 for each source)1573 permutation: Specifies the gauge numbers that for which data is to be1574 extracted1575 """1576 1577 from urs_ext import read_mux21578 1579 numSrc = len(filenames)1580 1581 file_params = -1 * num.ones(3, num.float) # [nsta,dt,nt]1582 1583 # Convert verbose to int C flag1584 if verbose:1585 verbose=11586 else:1587 verbose=01588 1589 if weights is None:1590 weights = num.ones(numSrc)1591 1592 if permutation is None:1593 permutation = ensure_numeric([], num.float)1594 1595 # Call underlying C implementation urs2sts_ext.c1596 data = read_mux2(numSrc, filenames, weights, file_params,1597 permutation, verbose)1598 1599 msg = 'File parameter values were not read in correctly from c file'1600 assert len(num.compress(file_params > 0, file_params)) != 0, msg1601 1602 msg = 'The number of stations specifed in the c array and in the file ' \1603 'are inconsistent'1604 assert file_params[0] >= len(permutation), msg1605 1606 msg = 'The number of stations returned is inconsistent with ' \1607 'the requested number'1608 assert len(permutation) == 0 or len(permutation) == data.shape[0], msg1609 1610 nsta = int(file_params[0])1611 msg = 'Must have at least one station'1612 assert nsta > 0, msg1613 1614 dt = file_params[1]1615 msg = 'Must have a postive timestep'1616 assert dt > 0, msg1617 1618 nt = int(file_params[2])1619 msg = 'Must have at least one gauge value'1620 assert nt > 0, msg1621 1622 OFFSET = 5 # Number of site parameters p passed back with data1623 # p = [geolat,geolon,depth,start_tstep,finish_tstep]1624 1625 # FIXME (Ole): What is the relationship with params and data.shape ?1626 # It looks as if the following asserts should pass but they don't always1627 #1628 #msg = 'nt = %d, data.shape[1] == %d' %(nt, data.shape[1])1629 #assert nt == data.shape[1] - OFFSET, msg1630 #1631 #msg = 'nsta = %d, data.shape[0] == %d' %(nsta, data.shape[0])1632 #assert nsta == data.shape[0], msg1633 1634 # Number of stations in ordering file1635 number_of_selected_stations = data.shape[0]1636 1637 # Index where data ends and parameters begin1638 parameters_index = data.shape[1] - OFFSET1639 1640 times = dt * num.arange(parameters_index)1641 latitudes = num.zeros(number_of_selected_stations, num.float)1642 longitudes = num.zeros(number_of_selected_stations, num.float)1643 elevation = num.zeros(number_of_selected_stations, num.float)1644 quantity = num.zeros((number_of_selected_stations, parameters_index), num.float)1645 1646 starttime = 1e161647 for i in range(number_of_selected_stations):1648 quantity[i][:] = data[i][:parameters_index]1649 latitudes[i] = data[i][parameters_index]1650 longitudes[i] = data[i][parameters_index+1]1651 elevation[i] = -data[i][parameters_index+2]1652 first_time_step = data[i][parameters_index+3]1653 starttime = min(dt*first_time_step, starttime)1654 1655 return times, latitudes, longitudes, elevation, quantity, starttime1656 1657 1658 ##1659 # @brief ??1660 # @param mux_times ??1661 # @param mint ??1662 # @param maxt ??1663 # @return ??1664 def read_time_from_mux(mux_times, mint, maxt):1665 """1666 """1667 1668 if mint == None:1669 mux_times_start_i = 01670 else:1671 mux_times_start_i = num.searchsorted(mux_times, mint)1672 1673 if maxt == None:1674 mux_times_fin_i = len(mux_times)1675 else:1676 maxt += 0.5 # so if you specify a time where there is1677 # data that time will be included1678 mux_times_fin_i = num.searchsorted(mux_times, maxt)1679 1680 return mux_times_start_i, mux_times_fin_i1681 1682 1683 1684 1685 1686 ##1687 # @brief Get data from a text file.1688 # @param filename Path to file to read.1689 # @param separator_value String to split header line with1690 # @return (header_fields, data), header_fields is a list of fields from header,1691 # data is an array (N columns x M lines) of data from the file.1692 def get_data_from_file(filename, separator_value=','):1693 """1694 Read in data information from file and1695 1696 Returns:1697 header_fields, a string? of the first line separated1698 by the 'separator_value'1699 1700 data, an array (N data columns X M lines) in the file1701 excluding the header1702 1703 NOTE: won't deal with columns with different lengths and there must be1704 no blank lines at the end.1705 """1706 1707 fid = open(filename)1708 lines = fid.readlines()1709 fid.close()1710 1711 header_line = lines[0]1712 header_fields = header_line.split(separator_value)1713 1714 # array to store data, number in there is to allow float...1715 # i'm sure there is a better way!1716 data = num.array([], dtype=num.float)1717 data = num.resize(data, ((len(lines)-1), len(header_fields)))1718 1719 array_number = 01720 line_number = 11721 while line_number < len(lines):1722 for i in range(len(header_fields)):1723 #this get line below the header, explaining the +11724 #and also the line_number can be used as the array index1725 fields = lines[line_number].split(separator_value)1726 #assign to array1727 data[array_number,i] = float(fields[i])1728 1729 line_number = line_number + 11730 array_number = array_number + 11731 1732 return header_fields, data1733 1448 1734 1449 -
TabularUnified trunk/anuga_core/source/anuga/shallow_water/eqf_v2.py ¶
r7276 r7765 32 32 dip, x0=0.0, y0=0.0, slip=1.0, rake=90.,\ 33 33 domain=None, verbose=False): 34 """ return a function representing a tsunami event 35 """ 34 36 35 37 from math import sin, radians … … 112 114 """ 113 115 114 from math import sin, cos, radians , exp, cosh116 from math import sin, cos, radians 115 117 #from okada import okadatest 116 118 … … 154 156 155 157 for i in range(N-1): 156 self.SRECTF(alp, xr[i]*.001,yr[i]*.001,depth*.001,zero,length,\157 zero, width,sd,cd,disl1,disl2,disl3)158 self.SRECTF(alp, xr[i]*.001, yr[i]*.001, depth*.001, zero, length,\ 159 zero, width, sd, cd, disl1, disl2, disl3) 158 160 159 161 z2[i] = self.U3 … … 202 204 if J == 2: XI=X-AL2 203 205 JK=J+K 204 if JK <>3:206 if JK != 3: 205 207 SIGN= F1 206 208 else: … … 236 238 def SRECTG(self,ALP,XI,ET,Q,SD,CD,DISL1,DISL2,DISL3): 237 239 """ 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 240 C 241 C********************************************************************* 242 C***** ***** 243 C***** INDEFINITE INTEGRAL OF SURFACE DISPLACEMENT,STRAIN,TILT ***** 244 C***** DUE TO RECTANGULAR FAULT IN A HALF-SPACE ***** 245 C***** CODED BY Y.OKADA ... JAN 1985 ***** 246 C***** ***** 247 C********************************************************************* 248 C 249 C***** INPUT 250 C***** ALP : MEDIUM CONSTANT MYU/(LAMDA+MYU)=1./((VP/VS)**2-1) 251 C***** XI,ET,Q : FAULT COORDINATE 252 C***** SD,CD : SIN,COS OF DIP-ANGLE 253 C***** (CD=0.D0, SD=+/-1.D0 SHOULD BE GIVEN FOR VERTICAL FAULT) 254 C***** DISL1,DISL2,DISL3 : STRIKE-, DIP- AND TENSILE-DISLOCATION 255 C 256 C***** OUTPUT 257 C***** U1, U2, U3 : DISPLACEMENT ( UNIT= UNIT OF DISL ) 258 C***** U11,U12,U21,U22 : STRAIN ( UNIT= UNIT OF DISL / 259 C***** U31,U32 : TILT UNIT OF XI,ET,Q ) 260 C 259 261 """ 260 262 … … 278 280 RRD=F1/(R*RD) 279 281 280 if Q <>F0:282 if Q != F0: 281 283 TT = atan( radians( XI*ET/(Q*R) )) 282 284 else: 283 285 TT = F0 284 286 285 if RET <>F0:287 if RET != F0: 286 288 RE = F1/RET 287 289 DLE= log(RET) … … 317 319 A5=F0 318 320 else: 319 A5= ALP*F2/CD*atan( radians((ET*(X+Q*CD)+X*(R+X)*SD) / (XI*(R+X)*CD) )) 321 A5= ALP*F2/CD*atan( radians((ET*(X+Q*CD)+X*(R+X)*SD) / \ 322 (XI*(R+X)*CD) )) 320 323 A4= ALP/CD*( log(RD) - SD*DLE ) 321 324 A3= ALP*(Y/RD/CD - DLE) + TD*A4 … … 341 344 U32=F0 342 345 343 if DISL1 <>F0:346 if DISL1 != F0: 344 347 #C====================================== 345 348 #C===== STRIKE-SLIP CONTRIBUTION ===== … … 353 356 U12=U12+ UN*( XI2*XI*( D/(ET2+Q2)/R3 - AET*SD ) - B2*SD ) 354 357 U21=U21+ UN*( XI*Q/R3*CD + (XI*Q2*AET - B2)*SD ) 355 U22=U22+ UN*( Y *Q/R3*CD + (Q*SD*(Q2*AET-F2*RRE) -(XI2+ET2)/R3*CD - B4)*SD ) 358 U22=U22+ UN*( Y *Q/R3*CD + (Q*SD*(Q2*AET-F2*RRE) - \ 359 (XI2+ET2)/R3*CD - B4)*SD ) 356 360 U31=U31+ UN*(-XI*Q2*AET*CD + (XI*Q/R3 - C1)*SD ) 357 361 U32=U32+ UN*( D*Q/R3*CD + (XI2*Q*AET*CD - SD/R + Y*Q/R3 - C2)*SD ) 358 362 359 if DISL2 <>F0:363 if DISL2 != F0: 360 364 #C=================================== 361 365 #C===== DIP-SLIP CONTRIBUTION ===== … … 373 377 U32=U32+ UN*( Y*D*Q*AXI - (F2*D*RRX + XI*SD*RRE)*SD + C1*SDCD ) 374 378 375 if DISL3 <>F0:379 if DISL3 != F0: 376 380 #C======================================== 377 381 #C===== TENSILE-FAULT CONTRIBUTION ===== … … 385 389 U12=U12- UN*(-D*Q/R3 - XI2*Q*AET*SD + B1*SDSD ) 386 390 U21=U21- UN*( Q2*(CD/R3 + Q*AET*SD) + B1*SDSD ) 387 U22=U22- UN*((Y*CD-D*SD)*Q2*AXI - F2*Q*SD*CD*RRX - (XI*Q2*AET - B2)*SDSD ) 391 U22=U22- UN*((Y*CD-D*SD)*Q2*AXI - F2*Q*SD*CD*RRX - \ 392 (XI*Q2*AET - B2)*SDSD ) 388 393 U31=U31- UN*( Q2*(SD/R3 - Q*AET*CD) + C3*SDSD ) 389 U32=U32- UN*((Y*SD+D*CD)*Q2*AXI + XI*Q2*AET*SD*CD - (F2*Q*RRX - C1)*SDSD ) 394 U32=U32- UN*((Y*SD+D*CD)*Q2*AXI + XI*Q2*AET*SD*CD - \ 395 (F2*Q*RRX - C1)*SDSD ) 390 396 391 397 self.DU1 = U1 … … 488 494 #====================================== 489 495 490 if POT1 <>F0:496 if POT1 != F0: 491 497 UN=POT1/PI2 492 498 QRX=QR*X … … 507 513 #====================================== 508 514 509 if POT2 <>F0:515 if POT2 != F0: 510 516 UN=POT2/PI2 511 517 SDCD=SD*CD … … 526 532 #======================================== 527 533 528 if POT3 <>F0:534 if POT3 != F0: 529 535 UN=POT3/PI2 530 536 SDSD=SD*SD -
TabularUnified trunk/anuga_core/source/anuga/shallow_water/test_data_manager.py ¶
r7759 r7765 22 22 from anuga.abstract_2d_finite_volumes.util import file_function 23 23 from anuga.utilities.system_tools import get_pathname_from_package 24 from anuga.utilities.file_utils import del_dir , load_csv_as_dict, \25 24 from anuga.utilities.file_utils import del_dir 25 from anuga.file.csv_file import load_csv_as_dict, load_csv_as_array 26 26 from anuga.anuga_exceptions import ANUGAError 27 27 from anuga.utilities.numerical_tools import ensure_numeric, mean … … 1992 1992 assert num.allclose([x[i],y[i]], [e,n]) 1993 1993 assert zone==-1 1994 1995 1994 self.delete_mux(files) 1996 1997 1998 def test_urs2sts_nonstandard_projection_reverse(self): 1999 """ 2000 Test that a point not in the specified zone can occur first 2001 """ 2002 tide=0 2003 time_step_count = 3 2004 time_step = 2 2005 lat_long_points =[(-21.,113.5),(-21.,114.5),(-21.,114.), (-21.,115.)] 2006 n=len(lat_long_points) 2007 first_tstep=num.ones(n,num.int) 2008 first_tstep[0]+=1 2009 first_tstep[2]+=1 2010 last_tstep=(time_step_count)*num.ones(n,num.int) 2011 last_tstep[0]-=1 2012 2013 gauge_depth=20*num.ones(n,num.float) 2014 ha=2*num.ones((n,time_step_count),num.float) 2015 ha[0]=num.arange(0,time_step_count) 2016 ha[1]=num.arange(time_step_count,2*time_step_count) 2017 ha[2]=num.arange(2*time_step_count,3*time_step_count) 2018 ha[3]=num.arange(3*time_step_count,4*time_step_count) 2019 ua=5*num.ones((n,time_step_count),num.float) 2020 va=-10*num.ones((n,time_step_count),num.float) 2021 2022 base_name, files = self.write_mux2(lat_long_points, 2023 time_step_count, time_step, 2024 first_tstep, last_tstep, 2025 depth=gauge_depth, 2026 ha=ha, 2027 ua=ua, 2028 va=va) 2029 2030 urs2sts(base_name, 2031 basename_out=base_name, 2032 zone=50, 2033 mean_stage=tide,verbose=False) 2034 2035 # now I want to check the sts file ... 2036 sts_file = base_name + '.sts' 2037 2038 #Let's interigate the sww file 2039 # Note, the sww info is not gridded. It is point data. 2040 fid = NetCDFFile(sts_file) 2041 2042 # Make x and y absolute 2043 x = fid.variables['x'][:] 2044 y = fid.variables['y'][:] 2045 2046 geo_reference = Geo_reference(NetCDFObject=fid) 2047 points = geo_reference.get_absolute(map(None, x, y)) 2048 points = ensure_numeric(points) 2049 2050 x = points[:,0] 2051 y = points[:,1] 2052 2053 # Check that all coordinate are correctly represented 2054 # Using the non standard projection (50) 2055 for i in range(4): 2056 zone, e, n = redfearn(lat_long_points[i][0], lat_long_points[i][1], 2057 zone=50) 2058 assert num.allclose([x[i],y[i]], [e,n]) 2059 assert zone==geo_reference.zone 2060 2061 self.delete_mux(files) 2062 2063 2064 def test_urs2stsII(self): 2065 """ 2066 Test multiple sources 2067 """ 2068 tide=0 2069 time_step_count = 3 2070 time_step = 2 2071 lat_long_points =[(-21.5,114.5),(-21,114.5),(-21.5,115), (-21.,115.)] 2072 n=len(lat_long_points) 2073 first_tstep=num.ones(n,num.int) 2074 first_tstep[0]+=1 2075 first_tstep[2]+=1 2076 last_tstep=(time_step_count)*num.ones(n,num.int) 2077 last_tstep[0]-=1 2078 2079 gauge_depth=20*num.ones(n,num.float) 2080 ha=2*num.ones((n,time_step_count),num.float) 2081 ha[0]=num.arange(0,time_step_count) 2082 ha[1]=num.arange(time_step_count,2*time_step_count) 2083 ha[2]=num.arange(2*time_step_count,3*time_step_count) 2084 ha[3]=num.arange(3*time_step_count,4*time_step_count) 2085 ua=5*num.ones((n,time_step_count),num.float) 2086 va=-10*num.ones((n,time_step_count),num.float) 2087 2088 # Create two identical mux files to be combined by urs2sts 2089 base_nameI, filesI = self.write_mux2(lat_long_points, 2090 time_step_count, time_step, 2091 first_tstep, last_tstep, 2092 depth=gauge_depth, 2093 ha=ha, 2094 ua=ua, 2095 va=va) 2096 2097 base_nameII, filesII = self.write_mux2(lat_long_points, 2098 time_step_count, time_step, 2099 first_tstep, last_tstep, 2100 depth=gauge_depth, 2101 ha=ha, 2102 ua=ua, 2103 va=va) 2104 2105 # Call urs2sts with multiple mux files 2106 urs2sts([base_nameI, base_nameII], 2107 basename_out=base_nameI, 2108 weights=[1.0, 1.0], 2109 mean_stage=tide, 2110 verbose=False) 2111 2112 # now I want to check the sts file ... 2113 sts_file = base_nameI + '.sts' 2114 2115 #Let's interrogate the sts file 2116 # Note, the sts info is not gridded. It is point data. 2117 fid = NetCDFFile(sts_file) 2118 2119 # Make x and y absolute 2120 x = fid.variables['x'][:] 2121 y = fid.variables['y'][:] 2122 2123 geo_reference = Geo_reference(NetCDFObject=fid) 2124 points = geo_reference.get_absolute(map(None, x, y)) 2125 points = ensure_numeric(points) 2126 2127 x = points[:,0] 2128 y = points[:,1] 2129 2130 #Check that first coordinate is correctly represented 2131 #Work out the UTM coordinates for first point 2132 zone, e, n = redfearn(lat_long_points[0][0], lat_long_points[0][1]) 2133 assert num.allclose([x[0],y[0]], [e,n]) 2134 2135 #Check the time vector 2136 times = fid.variables['time'][:] 2137 2138 times_actual = [] 2139 for i in range(time_step_count): 2140 times_actual.append(time_step * i) 2141 2142 assert num.allclose(ensure_numeric(times), 2143 ensure_numeric(times_actual)) 2144 2145 #Check first value 2146 stage = fid.variables['stage'][:] 2147 xmomentum = fid.variables['xmomentum'][:] 2148 ymomentum = fid.variables['ymomentum'][:] 2149 elevation = fid.variables['elevation'][:] 2150 2151 # Set original data used to write mux file to be zero when gauges are 2152 # not recdoring 2153 2154 ha[0][0]=0.0 2155 ha[0][time_step_count-1]=0.0 2156 ha[2][0]=0.0 2157 ua[0][0]=0.0 2158 ua[0][time_step_count-1]=0.0 2159 ua[2][0]=0.0 2160 va[0][0]=0.0 2161 va[0][time_step_count-1]=0.0 2162 va[2][0]=0.0; 2163 2164 # The stage stored in the .sts file should be the sum of the stage 2165 # in the two mux2 files because both have weights = 1. In this case 2166 # the mux2 files are the same so stage == 2.0 * ha 2167 #print 2.0*num.transpose(ha) - stage 2168 assert num.allclose(2.0*num.transpose(ha), stage) #Meters 2169 2170 #Check the momentums - ua 2171 #momentum = velocity*(stage-elevation) 2172 # elevation = - depth 2173 #momentum = velocity_ua *(stage+depth) 2174 2175 depth=num.zeros((len(lat_long_points),time_step_count),num.float) 2176 for i in range(len(lat_long_points)): 2177 depth[i]=gauge_depth[i]+tide+2.0*ha[i] 2178 #2.0*ha necessary because using two files with weights=1 are used 2179 2180 # The xmomentum stored in the .sts file should be the sum of the ua 2181 # in the two mux2 files multiplied by the depth. 2182 assert num.allclose(2.0*num.transpose(ua*depth), xmomentum) 2183 2184 #Check the momentums - va 2185 #momentum = velocity*(stage-elevation) 2186 # elevation = - depth 2187 #momentum = velocity_va *(stage+depth) 2188 2189 # The ymomentum stored in the .sts file should be the sum of the va 2190 # in the two mux2 files multiplied by the depth. 2191 assert num.allclose(2.0*num.transpose(va*depth), ymomentum) 2192 2193 # check the elevation values. 2194 # -ve since urs measures depth, sww meshers height, 2195 assert num.allclose(-elevation, gauge_depth) #Meters 2196 2197 fid.close() 2198 self.delete_mux(filesI) 2199 self.delete_mux(filesII) 2200 os.remove(sts_file) 1995 1996 2201 1997 2202 1998 def test_urs2sts_individual_sources(self): -
TabularUnified trunk/anuga_core/source/anuga/utilities/file_utils.py ¶
r7756 r7765 308 308 return iterate_over 309 309 310 311 ##312 # @brief Read a CSV file and convert to a dictionary of {key: column}.313 # @param file_name The path to the file to read.314 # @param title_check_list List of titles that *must* be columns in the file.315 # @return Two dicts: ({key:column}, {title:index}).316 # @note WARNING: Values are returned as strings.317 def load_csv_as_dict(file_name, title_check_list=None):318 """319 Load in the csv as a dictionary, title as key and column info as value.320 Also, create a dictionary, title as key and column index as value,321 to keep track of the column order.322 323 Two dictionaries are returned.324 325 WARNING: Values are returned as strings.326 Do this to change a list of strings to a list of floats327 time = [float(x) for x in time]328 """329 330 # FIXME(Ole): Consider dealing with files without headers331 # FIXME(Ole): Consider a wrapper automatically converting text fields332 # to the right type by trying for: int, float, string333 334 attribute_dic = {}335 title_index_dic = {}336 titles_stripped = [] # List of titles337 338 reader = csv.reader(file(file_name))339 340 # Read in and manipulate the title info341 titles = reader.next()342 for i, title in enumerate(titles):343 header = title.strip()344 titles_stripped.append(header)345 title_index_dic[header] = i346 title_count = len(titles_stripped)347 348 # Check required columns349 if title_check_list is not None:350 for title_check in title_check_list:351 if not title_index_dic.has_key(title_check):352 msg = 'Reading error. This row is not present %s' % title_check353 raise IOError, msg354 355 # Create a dictionary of column values, indexed by column title356 for line in reader:357 n = len(line) # Number of entries358 if n != title_count:359 msg = 'Entry in file %s had %d columns ' % (file_name, n)360 msg += 'although there were %d headers' % title_count361 raise IOError, msg362 for i, value in enumerate(line):363 attribute_dic.setdefault(titles_stripped[i], []).append(value)364 365 return attribute_dic, title_index_dic366 367 368 369 ##370 # @brief Convert CSV file to a dictionary of arrays.371 # @param file_name The path to the file to read.372 def load_csv_as_array(file_name):373 """374 Convert CSV files of the form:375 376 time, discharge, velocity377 0.0, 1.2, 0.0378 0.1, 3.2, 1.1379 ...380 381 to a dictionary of numeric arrays.382 383 384 See underlying function load_csv_as_dict for more details.385 """386 387 X, _ = load_csv_as_dict(file_name)388 389 Y = {}390 for key in X.keys():391 Y[key] = num.array([float(x) for x in X[key]])392 393 return Y394 310 395 311 -
TabularUnified trunk/anuga_core/source/anuga/utilities/test_file_utils.py ¶
r7756 r7765 4 4 import shutil 5 5 6 from file_utils import copy_code_files6 from anuga.utilities.file_utils import copy_code_files 7 7 8 8 … … 59 59 #suite = unittest.makeSuite(Test_Data_Manager, 'test_sww2domain2') 60 60 suite = unittest.makeSuite(Test_FileUtils, 'test_sww') 61 61 runner = unittest.TextTestRunner() #verbosity=2) 62 runner.run(suite)
Note: See TracChangeset
for help on using the changeset viewer.