Changeset 6304 for branches/numpy/anuga/shallow_water/data_manager.py
- Timestamp:
- Feb 10, 2009, 11:11:04 AM (16 years ago)
- Location:
- branches/numpy
- Files:
-
- 1 edited
- 1 copied
Legend:
- Unmodified
- Added
- Removed
-
branches/numpy/anuga/shallow_water/data_manager.py
r6224 r6304 61 61 from os import sep, path, remove, mkdir, access, F_OK, W_OK, getcwd 62 62 63 import Numericas num63 import numpy as num 64 64 65 65 from Scientific.IO.NetCDF import NetCDFFile … … 76 76 default_minimum_storable_height 77 77 from anuga.config import netcdf_mode_r, netcdf_mode_w, netcdf_mode_a 78 from anuga.config import netcdf_float, netcdf_float32, netcdf_int 78 79 from anuga.config import max_float 79 80 from anuga.utilities.numerical_tools import ensure_numeric, mean … … 343 344 from Scientific.IO.NetCDF import NetCDFFile 344 345 345 self.precision = n um.Float32 #Use single precision for quantities346 self.precision = netcdf_float32 #Use single precision for quantities 346 347 self.recursion = recursion 347 348 self.mode = mode … … 438 439 439 440 # store the connectivity data 440 points = num.concatenate( (X[:,num. NewAxis],Y[:,num.NewAxis]), axis=1 )441 points = num.concatenate( (X[:,num.newaxis],Y[:,num.newaxis]), axis=1 ) 441 442 self.writer.store_triangulation(fid, 442 443 points, 443 # V.astype(volumes.typecode()), 444 V.astype(num.Float32), 444 V.astype(num.float32), 445 445 Z, 446 446 points_georeference=\ … … 561 561 # Define a zero vector of same size and type as A 562 562 # for use with momenta 563 null = num.zeros(num.size(A), A. typecode())563 null = num.zeros(num.size(A), A.dtype.char) #??# 564 564 565 565 # Get xmomentum where depth exceeds minimum_storable_height … … 622 622 from Scientific.IO.NetCDF import NetCDFFile 623 623 624 self.precision = n um.Float #Use full precision624 self.precision = netcdf_float #Use full precision 625 625 626 626 Data_format.__init__(self, domain, 'sww', mode) … … 650 650 651 651 652 fid.createVariable('volumes', n um.Int, ('number_of_volumes',653 'number_of_vertices'))652 fid.createVariable('volumes', netcdf_int, ('number_of_volumes', 653 'number_of_vertices')) 654 654 655 655 fid.createVariable('time', self.precision, ('number_of_timesteps',)) … … 1304 1304 1305 1305 M = size #Number of lines 1306 xx = num.zeros((M,3), num. Float)1307 yy = num.zeros((M,3), num. Float)1308 zz = num.zeros((M,3), num. Float)1306 xx = num.zeros((M,3), num.float) 1307 yy = num.zeros((M,3), num.float) 1308 zz = num.zeros((M,3), num.float) 1309 1309 1310 1310 for i in range(M): … … 1349 1349 1350 1350 M = len(lines) #Number of lines 1351 x = num.zeros((M,3), num. Float)1352 y = num.zeros((M,3), num. Float)1353 z = num.zeros((M,3), num. Float)1351 x = num.zeros((M,3), num.float) 1352 y = num.zeros((M,3), num.float) 1353 z = num.zeros((M,3), num.float) 1354 1354 1355 1355 for i, line in enumerate(lines): … … 1409 1409 # @param step Timestep stride. 1410 1410 def filter_netcdf(filename1, filename2, first=0, last=None, step=1): 1411 """Read netcdf filename1, pick timesteps first:step:last and save to 1411 """Filter data file, selecting timesteps first:step:last. 1412 1413 Read netcdf filename1, pick timesteps first:step:last and save to 1412 1414 nettcdf file filename2 1413 1415 """ … … 1426 1428 for name in infile.variables: 1427 1429 var = infile.variables[name] 1428 outfile.createVariable(name, var. typecode(), var.dimensions)1430 outfile.createVariable(name, var.dtype.char, var.dimensions) #??# 1429 1431 1430 1432 # Copy the static variables … … 1499 1501 Convert to NetCDF pts format which is 1500 1502 1501 points: (Nx2) Float array1502 elevation: N Float array1503 points: (Nx2) float array 1504 elevation: N float array 1503 1505 """ 1504 1506 … … 1658 1660 1659 1661 # Variable definitions 1660 outfile.createVariable('points', n um.Float, ('number_of_points',1661 'number_of_dimensions'))1662 outfile.createVariable('elevation', n um.Float, ('number_of_points',))1662 outfile.createVariable('points', netcdf_float, ('number_of_points', 1663 'number_of_dimensions')) 1664 outfile.createVariable('elevation', netcdf_float, ('number_of_points',)) 1663 1665 1664 1666 # Get handles to the variables … … 1684 1686 newcols = lenv # ncols_in_bounding_box 1685 1687 1686 telev = num.zeros(newcols, num. Float)1687 tpoints = num.zeros((newcols, 2), num. Float)1688 telev = num.zeros(newcols, num.float) 1689 tpoints = num.zeros((newcols, 2), num.float) 1688 1690 1689 1691 local_index = 0 … … 1801 1803 Convert to NetCDF pts format which is 1802 1804 1803 points: (Nx2) Float array1804 elevation: N Float array1805 points: (Nx2) float array 1806 elevation: N float array 1805 1807 """ 1806 1808 … … 2245 2247 # Comment out for reduced memory consumption 2246 2248 for name in ['stage', 'xmomentum', 'ymomentum']: 2247 q = fid.variables[name][:].flat 2249 q = fid.variables[name][:].flatten() 2248 2250 if timestep is not None: 2249 2251 q = q[timestep*len(x):(timestep+1)*len(x)] 2250 2252 if verbose: print ' %s in [%f, %f]' %(name, min(q), max(q)) 2251 2253 for name in ['elevation']: 2252 q = fid.variables[name][:].flat 2254 q = fid.variables[name][:].flatten() 2253 2255 if verbose: print ' %s in [%f, %f]' %(name, min(q), max(q)) 2254 2256 … … 2256 2258 if verbose: print 'Processing quantity %s' %quantity 2257 2259 2258 # Turn NetCDF objects into Numeric arrays2260 # Turn NetCDF objects into numeric arrays 2259 2261 try: 2260 2262 q = fid.variables[quantity][:] … … 2269 2271 #q has a time component, must be reduced alongthe temporal dimension 2270 2272 if verbose: print 'Reducing quantity %s' %quantity 2271 q_reduced = num.zeros(number_of_points, num. Float)2273 q_reduced = num.zeros(number_of_points, num.float) 2272 2274 2273 2275 if timestep is not None: … … 2329 2331 y = y + yllcorner - newyllcorner 2330 2332 2331 vertex_points = num.concatenate ((x[:,num. NewAxis], y[:,num.NewAxis]), axis=1)2333 vertex_points = num.concatenate ((x[:,num.newaxis], y[:,num.newaxis]), axis=1) 2332 2334 assert len(vertex_points.shape) == 2 2333 2335 2334 grid_points = num.zeros ((ncols*nrows, 2), num. Float)2336 grid_points = num.zeros ((ncols*nrows, 2), num.float) 2335 2337 2336 2338 for i in xrange(nrows): … … 2359 2361 #Interpolate using quantity values 2360 2362 if verbose: print 'Interpolating' 2361 grid_values = interp.interpolate(q, grid_points).flat 2363 grid_values = interp.interpolate(q, grid_points).flatten() 2362 2364 2363 2365 if verbose: 2364 print 'Interpolated values are in [%f, %f]' %(min(grid_values ),2365 max(grid_values ))2366 print 'Interpolated values are in [%f, %f]' %(min(grid_values.flat), 2367 max(grid_values.flat)) 2366 2368 2367 2369 #Assign NODATA_value to all points outside bounding polygon (from interpolation mesh) … … 2631 2633 if verbose: print 'Processing quantity %s' % quantity 2632 2634 2633 # Turn NetCDF objects into Numeric arrays2635 # Turn NetCDF objects into numeric arrays 2634 2636 quantity_dict = {} 2635 2637 for name in fid.variables.keys(): … … 2644 2646 if verbose: print 'Reducing quantity %s' % quantity 2645 2647 2646 q_reduced = num.zeros(number_of_points, num. Float)2648 q_reduced = num.zeros(number_of_points, num.float) 2647 2649 for k in range(number_of_points): 2648 2650 q_reduced[k] = reduction(q[:,k]) … … 2658 2660 2659 2661 # Create grid and update xll/yll corner and x,y 2660 vertex_points = num.concatenate((x[:, num. NewAxis], y[:, num.NewAxis]), axis=1)2662 vertex_points = num.concatenate((x[:, num.newaxis], y[:, num.newaxis]), axis=1) 2661 2663 assert len(vertex_points.shape) == 2 2662 2664 … … 2667 2669 # Interpolate using quantity values 2668 2670 if verbose: print 'Interpolating' 2669 interpolated_values = interp.interpolate(q, data_points).flat 2671 interpolated_values = interp.interpolate(q, data_points).flatten 2670 2672 2671 2673 if verbose: 2672 print 'Interpolated values are in [%f, %f]' % (min(interpolated_values),2673 max(interpolated_values))2674 print 'Interpolated values are in [%f, %f]' \ 2675 % (min(interpolated_values.flat), max(interpolated_values.flat)) 2674 2676 2675 2677 # Assign NODATA_value to all points outside bounding polygon … … 2878 2880 2879 2881 # variable definitions 2880 fid.createVariable('elevation', n um.Float, ('number_of_rows',2881 'number_of_columns'))2882 fid.createVariable('elevation', netcdf_float, ('number_of_rows', 2883 'number_of_columns')) 2882 2884 2883 2885 # Get handles to the variables … … 2959 2961 from Scientific.IO.NetCDF import NetCDFFile 2960 2962 2961 precision = num. Float2963 precision = num.float 2962 2964 2963 2965 msg = 'Must use latitudes and longitudes for minlat, maxlon etc' … … 3078 3080 # elevations = file_e.variables['ELEVATION'][kmin:kmax, lmin:lmax] 3079 3081 # elif latitudes2[0]==latitudes[-1] and latitudes2[-1]==latitudes[0]: 3080 # from Numericimport asarray3082 # from numpy import asarray 3081 3083 # elevations=elevations.tolist() 3082 3084 # elevations.reverse() 3083 3085 # elevations=asarray(elevations) 3084 3086 # else: 3085 # from Numericimport asarray3087 # from numpy import asarray 3086 3088 # elevations=elevations.tolist() 3087 3089 # elevations.reverse() … … 3195 3197 sww.store_header(outfile, times, number_of_volumes, 3196 3198 number_of_points, description=description, 3197 verbose=verbose, sww_precision=n um.Float)3199 verbose=verbose, sww_precision=netcdf_float) 3198 3200 3199 3201 # Store 3200 3202 from anuga.coordinate_transforms.redfearn import redfearn 3201 x = num.zeros(number_of_points, num. Float) #Easting3202 y = num.zeros(number_of_points, num. Float) #Northing3203 x = num.zeros(number_of_points, num.float) #Easting 3204 y = num.zeros(number_of_points, num.float) #Northing 3203 3205 3204 3206 if verbose: print 'Making triangular grid' … … 3234 3236 volumes.append([v4,v3,v2]) #Lower element 3235 3237 3236 volumes = num.array(volumes , num.Int) #array default#3238 volumes = num.array(volumes) 3237 3239 3238 3240 if origin is None: … … 3255 3257 outfile.variables['z'][:] = z #FIXME HACK for bacwards compat. 3256 3258 outfile.variables['elevation'][:] = z 3257 outfile.variables['volumes'][:] = volumes.astype(num. Int32) #For Opteron 643259 outfile.variables['volumes'][:] = volumes.astype(num.int32) #For Opteron 64 3258 3260 3259 3261 #Time stepping … … 3387 3389 d = len(q) 3388 3390 3389 T = num.zeros(N, num. Float) # Time3390 Q = num.zeros((N, d), num. Float) # Values3391 T = num.zeros(N, num.float) # Time 3392 Q = num.zeros((N, d), num.float) # Values 3391 3393 3392 3394 for i, line in enumerate(lines): … … 3424 3426 fid.createDimension('number_of_timesteps', len(T)) 3425 3427 3426 fid.createVariable('time', n um.Float, ('number_of_timesteps',))3428 fid.createVariable('time', netcdf_float, ('number_of_timesteps',)) 3427 3429 3428 3430 fid.variables['time'][:] = T … … 3434 3436 name = 'Attribute%d' % i 3435 3437 3436 fid.createVariable(name, n um.Float, ('number_of_timesteps',))3438 fid.createVariable(name, netcdf_float, ('number_of_timesteps',)) 3437 3439 fid.variables[name][:] = Q[:,i] 3438 3440 … … 3505 3507 time_interp = get_time_interp(time,t) 3506 3508 3507 # Get the variables as Numeric arrays3509 # Get the variables as numeric arrays 3508 3510 x = fid.variables['x'][:] # x-coordinates of vertices 3509 3511 y = fid.variables['y'][:] # y-coordinates of vertices … … 3518 3520 # FIXME (Ole): Something like this might be better: 3519 3521 # concatenate((x, y), axis=1) 3520 # or concatenate((x[:,num. NewAxis], x[:,num.NewAxis]), axis=1)3522 # or concatenate((x[:,num.newaxis], x[:,num.newaxis]), axis=1) 3521 3523 3522 3524 conserved_quantities = [] … … 3706 3708 # @param boundary 3707 3709 def weed(coordinates, volumes, boundary=None): 3708 if type(coordinates) == num.ArrayType:3710 if isinstance(coordinates, num.ndarray): 3709 3711 coordinates = coordinates.tolist() 3710 if type(volumes) == num.ArrayType:3712 if isinstance(volumes, num.ndarray): 3711 3713 volumes = volumes.tolist() 3712 3714 … … 3854 3856 3855 3857 # variable definition 3856 outfile.createVariable('elevation', n um.Float, ('number_of_points',))3858 outfile.createVariable('elevation', netcdf_float, ('number_of_points',)) 3857 3859 3858 3860 # Get handle to the variable … … 3867 3869 3868 3870 lower_index = global_index 3869 telev = num.zeros(ncols_new, num. Float)3871 telev = num.zeros(ncols_new, num.float) 3870 3872 local_index = 0 3871 3873 trow = i * cellsize_ratio … … 3995 3997 from anuga.coordinate_transforms.redfearn import redfearn 3996 3998 3997 precision = n um.Float # So if we want to change the precision its done here3999 precision = netcdf_float # So if we want to change the precision its done here 3998 4000 3999 4001 # go in to the bath dir and load the only file, … … 4096 4098 ################################# 4097 4099 4098 outfile.createVariable('volumes', n um.Int, ('number_of_volumes',4099 'number_of_vertices'))4100 outfile.createVariable('volumes', netcdf_int, ('number_of_volumes', 4101 'number_of_vertices')) 4100 4102 4101 4103 outfile.createVariable('time', precision, ('number_of_timesteps',)) … … 4113 4115 from anuga.coordinate_transforms.redfearn import redfearn 4114 4116 4115 x = num.zeros(number_of_points, num. Float) #Easting4116 y = num.zeros(number_of_points, num. Float) #Northing4117 x = num.zeros(number_of_points, num.float) #Easting 4118 y = num.zeros(number_of_points, num.float) #Northing 4117 4119 4118 4120 if verbose: print 'Making triangular grid' … … 4150 4152 volumes.append([v4,v2,v3]) #Lower element 4151 4153 4152 volumes = num.array(volumes , num.Int) #array default#4154 volumes = num.array(volumes) 4153 4155 4154 4156 geo_ref = Geo_reference(refzone, min(x), min(y)) … … 4175 4177 outfile.variables['z'][:] = z 4176 4178 outfile.variables['elevation'][:] = z 4177 outfile.variables['volumes'][:] = volumes.astype(num. Int32) # On Opteron 644179 outfile.variables['volumes'][:] = volumes.astype(num.int32) # On Opteron 64 4178 4180 4179 4181 stage = outfile.variables['stage'] … … 4367 4369 lat_name = 'LAT' 4368 4370 time_name = 'TIME' 4369 precision = n um.Float # So if we want to change the precision its done here4371 precision = netcdf_float # So if we want to change the precision its done here 4370 4372 4371 4373 ## … … 4648 4650 lonlatdep = p_array.array('f') 4649 4651 lonlatdep.read(mux_file, columns * points_num) 4650 lonlatdep = num.array(lonlatdep, typecode=num.Float)4652 lonlatdep = num.array(lonlatdep, dtype=num.float) 4651 4653 lonlatdep = num.reshape(lonlatdep, (points_num, columns)) 4652 4654 … … 4655 4657 lon_sorted.sort() 4656 4658 4657 if not lon == lon_sorted:4659 if not num.alltrue(lon == lon_sorted): 4658 4660 msg = "Longitudes in mux file are not in ascending order" 4659 4661 raise IOError, msg … … 4661 4663 lat_sorted = list(lat) 4662 4664 lat_sorted.sort() 4663 4664 # UNUSED?4665 ## if not lat == lat_sorted:4666 ## msg = "Latitudes in mux file are not in ascending order"4667 4665 4668 4666 nc_file = Write_nc(quantity, … … 4677 4675 hz_p_array = p_array.array('f') 4678 4676 hz_p_array.read(mux_file, points_num) 4679 hz_p = num.array(hz_p_array, typecode=num.Float)4677 hz_p = num.array(hz_p_array, dtype=num.float) 4680 4678 hz_p = num.reshape(hz_p, (len(lon), len(lat))) 4681 4679 hz_p = num.transpose(hz_p) # mux has lat varying fastest, nc has long v.f. … … 4775 4773 QUANTITY = 2 4776 4774 4777 long_lat_dep = ensure_numeric(long_lat_dep, num. Float)4775 long_lat_dep = ensure_numeric(long_lat_dep, num.float) 4778 4776 4779 4777 num_points = long_lat_dep.shape[0] … … 4813 4811 # FIXME - make this faster/do this a better way 4814 4812 # use numeric transpose, after reshaping the quantity vector 4815 quantity = num.zeros(num_points, num. Float)4813 quantity = num.zeros(num_points, num.float) 4816 4814 4817 4815 for lat_i, _ in enumerate(lat): … … 5266 5264 5267 5265 points_utm=ensure_numeric(points_utm) 5268 assert ensure_numeric(mesh_dic['generatedpointlist']) \5269 == ensure_numeric(points_utm)5266 assert num.alltrue(ensure_numeric(mesh_dic['generatedpointlist']) 5267 == ensure_numeric(points_utm)) 5270 5268 5271 5269 volumes = mesh_dic['generatedtrianglelist'] … … 5285 5283 sww = Write_sww() 5286 5284 sww.store_header(outfile, times, len(volumes), len(points_utm), 5287 verbose=verbose, sww_precision=n um.Float)5285 verbose=verbose, sww_precision=netcdf_float) 5288 5286 outfile.mean_stage = mean_stage 5289 5287 outfile.zscale = zscale … … 5309 5307 xmomentum=xmomentum, 5310 5308 ymomentum=ymomentum, 5311 sww_precision=num. Float)5309 sww_precision=num.float) 5312 5310 j += 1 5313 5311 … … 5356 5354 numSrc = len(filenames) 5357 5355 5358 file_params = -1 * num.ones(3, num. Float) # [nsta,dt,nt]5356 file_params = -1 * num.ones(3, num.float) # [nsta,dt,nt] 5359 5357 5360 5358 # Convert verbose to int C flag … … 5365 5363 5366 5364 if weights is None: 5367 weights = num.ones(numSrc )5365 weights = num.ones(numSrc, num.int) #array default# 5368 5366 5369 5367 if permutation is None: 5370 permutation = ensure_numeric([], num. Float)5368 permutation = ensure_numeric([], num.float) 5371 5369 5372 5370 # Call underlying C implementation urs2sts_ext.c … … 5416 5414 5417 5415 times = dt * num.arange(parameters_index) 5418 latitudes = num.zeros(number_of_selected_stations, num. Float)5419 longitudes = num.zeros(number_of_selected_stations, num. Float)5420 elevation = num.zeros(number_of_selected_stations, num. Float)5421 quantity = num.zeros((number_of_selected_stations, parameters_index), num. Float)5416 latitudes = num.zeros(number_of_selected_stations, num.float) 5417 longitudes = num.zeros(number_of_selected_stations, num.float) 5418 elevation = num.zeros(number_of_selected_stations, num.float) 5419 quantity = num.zeros((number_of_selected_stations, parameters_index), num.float) 5422 5420 5423 5421 starttime = 1e16 … … 5543 5541 if weights is None: 5544 5542 # Default is equal weighting 5545 weights = num.ones(numSrc, num. Float) / numSrc5543 weights = num.ones(numSrc, num.float) / numSrc 5546 5544 else: 5547 5545 weights = ensure_numeric(weights) … … 5665 5663 # 0 to number_of_points-1 5666 5664 if permutation is None: 5667 permutation = num.arange(number_of_points, typecode=num.Int)5665 permutation = num.arange(number_of_points, dtype=num.int) 5668 5666 5669 5667 # NetCDF file definition … … 5679 5677 description=description, 5680 5678 verbose=verbose, 5681 sts_precision=n um.Float)5679 sts_precision=netcdf_float) 5682 5680 5683 5681 # Store 5684 5682 from anuga.coordinate_transforms.redfearn import redfearn 5685 5683 5686 x = num.zeros(number_of_points, num. Float) # Easting5687 y = num.zeros(number_of_points, num. Float) # Northing5684 x = num.zeros(number_of_points, num.float) # Easting 5685 y = num.zeros(number_of_points, num.float) # Northing 5688 5686 5689 5687 # Check zone boundaries … … 5716 5714 5717 5715 elevation = num.resize(elevation, outfile.variables['elevation'][:].shape) 5718 outfile.variables['permutation'][:] = permutation.astype(num. Int32) # Opteron 645716 outfile.variables['permutation'][:] = permutation.astype(num.int32) # Opteron 64 5719 5717 outfile.variables['x'][:] = x - geo_ref.get_xllcorner() 5720 5718 outfile.variables['y'][:] = y - geo_ref.get_yllcorner() … … 5821 5819 # @param smoothing True if smoothing is to be used. 5822 5820 # @param order 5823 # @param sww_precision Data type of the quantitiy to be written (Float32)5821 # @param sww_precision Data type of the quantitiy written (netcdf constant) 5824 5822 # @param verbose True if this function is to be verbose. 5825 5823 # @note If 'times' is a list, the info will be made relative. … … 5832 5830 smoothing=True, 5833 5831 order=1, 5834 sww_precision=n um.Float32,5832 sww_precision=netcdf_float32, 5835 5833 verbose=False): 5836 5834 """Write an SWW file header. … … 5865 5863 # This is being used to seperate one number from a list. 5866 5864 # what it is actually doing is sorting lists from numeric arrays. 5867 if type(times) is list or type(times) is num.ArrayType:5865 if type(times) is list or isinstance(times, num.ndarray): 5868 5866 number_of_times = len(times) 5869 5867 times = ensure_numeric(times) … … 5914 5912 outfile.createVariable('z', sww_precision, ('number_of_points',)) 5915 5913 5916 outfile.createVariable('volumes', n um.Int, ('number_of_volumes',5917 'number_of_vertices'))5914 outfile.createVariable('volumes', netcdf_int, ('number_of_volumes', 5915 'number_of_vertices')) 5918 5916 5919 5917 # Doing sww_precision instead of Float gives cast errors. 5920 outfile.createVariable('time', n um.Float,5918 outfile.createVariable('time', netcdf_float, 5921 5919 ('number_of_timesteps',)) 5922 5920 … … 5932 5930 #outfile.variables[q+Write_sww.RANGE][1] = -max_float # Max 5933 5931 5934 if type(times) is list or type(times) is num.ArrayType:5932 if type(times) is list or isinstance(times, num.ndarray): 5935 5933 outfile.variables['time'][:] = times #Store time relative 5936 5934 … … 6035 6033 outfile.variables['z'][:] = elevation 6036 6034 outfile.variables['elevation'][:] = elevation #FIXME HACK 6037 outfile.variables['volumes'][:] = volumes.astype(num. Int32) #On Opteron 646035 outfile.variables['volumes'][:] = volumes.astype(num.int32) #On Opteron 64 6038 6036 6039 6037 q = 'elevation' … … 6051 6049 # @param verbose True if this function is to be verbose. 6052 6050 # @param **quant 6053 def store_quantities(self, outfile, sww_precision=num. Float32,6051 def store_quantities(self, outfile, sww_precision=num.float32, 6054 6052 slice_index=None, time=None, 6055 6053 verbose=False, **quant): … … 6236 6234 # @param number_of_points The number of URS gauge sites. 6237 6235 # @param description Description string to write into the STS file. 6238 # @param sts_precision Format of data to write ( default Float32).6236 # @param sts_precision Format of data to write (netcdf constant ONLY). 6239 6237 # @param verbose True if this function is to be verbose. 6240 6238 # @note If 'times' is a list, the info will be made relative. … … 6244 6242 number_of_points, 6245 6243 description='Converted from URS mux2 format', 6246 sts_precision=n um.Float32,6244 sts_precision=netcdf_float32, 6247 6245 verbose=False): 6248 6246 """ … … 6267 6265 # This is being used to seperate one number from a list. 6268 6266 # what it is actually doing is sorting lists from numeric arrays. 6269 if type(times) is list or type(times) is num.ArrayType:6267 if type(times) is list or isinstance(times, num.ndarray): 6270 6268 number_of_times = len(times) 6271 6269 times = ensure_numeric(times) … … 6287 6285 6288 6286 # Variable definitions 6289 outfile.createVariable('permutation', n um.Int, ('number_of_points',))6287 outfile.createVariable('permutation', netcdf_int, ('number_of_points',)) 6290 6288 outfile.createVariable('x', sts_precision, ('number_of_points',)) 6291 6289 outfile.createVariable('y', sts_precision, ('number_of_points',)) … … 6302 6300 6303 6301 # Doing sts_precision instead of Float gives cast errors. 6304 outfile.createVariable('time', n um.Float, ('number_of_timesteps',))6302 outfile.createVariable('time', netcdf_float, ('number_of_timesteps',)) 6305 6303 6306 6304 for q in Write_sts.sts_quantities: … … 6314 6312 outfile.variables[q + Write_sts.RANGE][1] = -max_float # Max 6315 6313 6316 if type(times) is list or type(times) is num.ArrayType:6314 if type(times) is list or isinstance(times, num.ndarray): 6317 6315 outfile.variables['time'][:] = times #Store time relative 6318 6316 … … 6423 6421 # @param verboseTrue if this function is to be verbose. 6424 6422 # @param **quant Extra keyword args. 6425 def store_quantities(self, outfile, sts_precision=num. Float32,6423 def store_quantities(self, outfile, sts_precision=num.float32, 6426 6424 slice_index=None, time=None, 6427 6425 verbose=False, **quant): … … 6514 6512 lonlatdep = p_array.array('f') 6515 6513 lonlatdep.read(mux_file, columns * self.points_num) 6516 lonlatdep = num.array(lonlatdep, typecode=num.Float)6514 lonlatdep = num.array(lonlatdep, dtype=num.float) 6517 6515 lonlatdep = num.reshape(lonlatdep, (self.points_num, columns)) 6518 6516 self.lonlatdep = lonlatdep … … 6550 6548 hz_p_array = p_array.array('f') 6551 6549 hz_p_array.read(self.mux_file, self.points_num) 6552 hz_p = num.array(hz_p_array, typecode=num.Float)6550 hz_p = num.array(hz_p_array, dtype=num.float) 6553 6551 self.iter_time_step += 1 6554 6552 … … 6695 6693 # array to store data, number in there is to allow float... 6696 6694 # i'm sure there is a better way! 6697 data = num.array([], typecode=num.Float)6695 data = num.array([], dtype=num.float) 6698 6696 data = num.resize(data, ((len(lines)-1), len(header_fields))) 6699 6697 … … 6861 6859 time += fid.starttime[0] 6862 6860 6863 # Get the variables as Numeric arrays6861 # Get the variables as numeric arrays 6864 6862 x = fid.variables['x'][:] # x-coordinates of nodes 6865 6863 y = fid.variables['y'][:] # y-coordinates of nodes … … 6870 6868 6871 6869 # Mesh (nodes (Mx2), triangles (Nx3)) 6872 nodes = num.concatenate((x[:,num. NewAxis], y[:,num.NewAxis]), axis=1)6870 nodes = num.concatenate((x[:,num.newaxis], y[:,num.newaxis]), axis=1) 6873 6871 triangles = fid.variables['volumes'][:] 6874 6872 … … 7293 7291 7294 7292 # Get the relevant quantities (Convert from single precison) 7295 elevation = num.array(fid.variables['elevation'][:], num. Float)7296 stage = num.array(fid.variables['stage'][:], num. Float)7293 elevation = num.array(fid.variables['elevation'][:], num.float) 7294 stage = num.array(fid.variables['stage'][:], num.float) 7297 7295 7298 7296 # Here's where one could convert nodal information to centroid … … 7311 7309 # and call it here 7312 7310 7313 points = num.concatenate((x[:,num. NewAxis], y[:,num.NewAxis]), axis=1)7311 points = num.concatenate((x[:,num.newaxis], y[:,num.newaxis]), axis=1) 7314 7312 7315 7313 point_indices = inside_polygon(points, polygon)
Note: See TracChangeset
for help on using the changeset viewer.