Changeset 2769
- Timestamp:
- Apr 27, 2006, 8:46:30 AM (19 years ago)
- Location:
- inundation/parallel
- Files:
-
- 3 added
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
inundation/parallel/pmesh_divide.py
r2193 r2769 72 72 ######################################################### 73 73 # 74 # Do a simple coordinate based division of the grid 75 # 76 # *) n_x is the number of subdivisions in the x direction 77 # *) n_y is the number of subdivisions in the y direction 74 # Divide the mesh using a call to metis, through pymetis. 78 75 # 79 76 # ------------------------------------------------------- … … 87 84 # 88 85 ######################################################### 89 90 def pmesh_divide(domain, n_x = 1, n_y = 1):91 92 # Find the bounding box93 94 x_coord_min = domain.xy_extent[0]95 x_coord_max = domain.xy_extent[2]96 y_coord_min = domain.xy_extent[1]97 y_coord_max = domain.xy_extent[3]98 99 # Find the size of each sub-box100 101 x_div = (x_coord_max-x_coord_min)/n_x102 y_div = (y_coord_max-y_coord_min)/n_y103 104 # Initialise the lists105 106 tri_list = []107 triangles_per_proc = []108 proc_sum = []109 for i in range(n_x*n_y):110 tri_list.append([])111 triangles_per_proc.append([])112 proc_sum.append([])113 tri_list[i] = []114 115 # Subdivide the triangles depending on which sub-box they sit116 # in (a triangle sits in the sub-box if its first vectex sits117 # in that sub-box)118 119 tri_index = {}120 N = domain.number_of_elements121 for i in range(N):122 t = domain.triangles[i]123 124 x_coord = domain.centroid_coordinates[i][0]125 bin_x = int(floor((x_coord-x_coord_min)/x_div))126 if (bin_x == n_x):127 bin_x = n_x-1128 129 y_coord = domain.centroid_coordinates[i][1]130 bin_y = int(floor((y_coord-y_coord_min)/y_div))131 if (bin_y == n_y):132 bin_y = n_y-1133 134 bin = bin_y*n_x + bin_x135 tri_list[bin].append(t)136 tri_index[i] = ([bin, len(tri_list[bin])-1])137 138 # Find the number of triangles per processor and order the139 # triangle list so that all of the triangles belonging to140 # processor i are listed before those belonging to processor141 # i+1142 143 triangles = []144 for i in range(n_x*n_y):145 triangles_per_proc[i] = len(tri_list[i])146 for t in tri_list[i]:147 triangles.append(t)148 149 # The boundary labels have to changed in accoradance with the150 # new triangle ordering, proc_sum and tri_index help with this151 152 proc_sum[0] = 0153 for i in range(n_x*n_y-1):154 proc_sum[i+1]=proc_sum[i]+triangles_per_proc[i]155 156 # Relabel the boundary elements to fit in with the new triangle157 # ordering158 159 boundary = {}160 for b in domain.boundary:161 t = tri_index[b[0]]162 boundary[proc_sum[t[0]]+t[1], b[1]] = domain.boundary[b]163 164 quantities = reorder(domain.quantities, tri_index, proc_sum)165 166 # Extract the node list167 168 nodes = domain.coordinates.copy()169 ttriangles = zeros((len(triangles), 3), Int)170 for i in range(len(triangles)):171 ttriangles[i] = triangles[i]172 173 return nodes, ttriangles, boundary, triangles_per_proc, quantities174 175 176 #########################################################177 #178 # Do a coordinate based division of the grid using the179 # centroid coordinate180 #181 # *) n_x is the number of subdivisions in the x direction182 # *) n_y is the number of subdivisions in the y direction183 #184 # -------------------------------------------------------185 #186 # *) The nodes, triangles, boundary, and quantities are187 # returned. triangles_per_proc defines the subdivision.188 # The first triangles_per_proc[0] triangles are assigned189 # to processor 0, the next triangles_per_proc[1] are190 # assigned to processor 1 etc. The boundary and quantites191 # are ordered the same way as the triangles192 #193 #########################################################194 def pmesh_divide_steve(domain, n_x = 1, n_y = 1):195 196 # Find the bounding box197 x_coord_min = domain.xy_extent[0]198 x_coord_max = domain.xy_extent[2]199 y_coord_min = domain.xy_extent[1]200 y_coord_max = domain.xy_extent[3]201 202 203 # Find the size of each sub-box204 205 x_div = (x_coord_max-x_coord_min)/n_x206 y_div = (y_coord_max-y_coord_min)/n_y207 208 209 # Initialise the lists210 211 tri_list = []212 triangles_per_proc = []213 proc_sum = []214 for i in range(n_x*n_y):215 tri_list.append([])216 triangles_per_proc.append([])217 proc_sum.append([])218 tri_list[i] = []219 220 # Subdivide the triangles depending on which sub-box they sit221 # in (a triangle sits in the sub-box if its first vectex sits222 # in that sub-box)223 224 tri_index = {}225 N = domain.number_of_elements226 227 # Sort by x coordinate of centroid228 229 sort_order = argsort(argsort(domain.centroid_coordinates[:,0]))230 231 x_div = float(N)/n_x232 233 for i in range(N):234 t = domain.triangles[i]235 236 bin = int(floor(sort_order[i]/x_div))237 if (bin == n_x):238 bin = n_x-1239 240 tri_list[bin].append(t)241 tri_index[i] = ([bin, len(tri_list[bin])-1])242 243 # Find the number of triangles per processor and order the244 # triangle list so that all of the triangles belonging to245 # processor i are listed before those belonging to processor246 # i+1247 248 triangles = []249 for i in range(n_x*n_y):250 triangles_per_proc[i] = len(tri_list[i])251 for t in tri_list[i]:252 triangles.append(t)253 254 255 # The boundary labels have to changed in accoradance with the256 # new triangle ordering, proc_sum and tri_index help with this257 258 proc_sum[0] = 0259 for i in range(n_x*n_y-1):260 proc_sum[i+1]=proc_sum[i]+triangles_per_proc[i]261 262 # Relabel the boundary elements to fit in with the new triangle263 # ordering264 265 boundary = {}266 for b in domain.boundary:267 t = tri_index[b[0]]268 boundary[proc_sum[t[0]]+t[1], b[1]] = domain.boundary[b]269 270 quantities = reorder(domain.quantities, tri_index, proc_sum)271 272 # Extract the node list273 274 nodes = domain.coordinates.copy()275 276 # Convert the triangle datastructure to be an array type,277 # this helps with the communication278 279 ttriangles = zeros((len(triangles), 3), Int)280 for i in range(len(triangles)):281 ttriangles[i] = triangles[i]282 283 return nodes, ttriangles, boundary, triangles_per_proc, quantities284 285 286 287 288 ###289 #290 # Divide the mesh using a call to metis, through pymetis.291 292 293 86 path.append('..' + sep + 'pymetis') 294 87 … … 341 134 tri_list[epart[i]].append(domain.triangles[i]) 342 135 tri_index[i] = ([epart[i], len(tri_list[epart[i]]) - 1]) 343 344 # print tri_list345 # print triangles_per_proc346 136 347 137 # Order the triangle list so that all of the triangles belonging -
inundation/parallel/run_parallel_advection.py
r2654 r2769 1 1 #!/usr/bin/env python 2 ### 3 ######################################################### 4 # 5 # Main file for parallel mesh testing. Runs an advection 6 # flow simulation using a rectangular mesh 7 # 8 # 9 # Authors: Steve Roberts June 2005 10 # Modified by Linda Stals April 2006 11 # 12 # 13 ######################################################### 14 2 15 3 16 import sys … … 5 18 sys.path.append('..'+sep+'pyvolution') 6 19 7 8 #======================================================================== 20 # Parallel communication routines 9 21 10 22 import pypar 11 23 12 from print_stats import print_test_stats, build_full_flag 24 # Mesh partition routines 13 25 14 26 from parallel_meshes import parallel_rectangle 15 27 28 # Parallel Domain 29 16 30 from parallel_advection import Parallel_Domain 17 31 from parallel_advection import Transmissive_boundary 18 32 19 # Define the inititial conditions 20 33 ############################ 34 # Set the initial conditions 35 ############################ 21 36 class Set_Stage: 22 37 """Set an initial condition with constant water height, for x<x0 … … 31 46 return self.h*((x>self.x0)&(x<self.x1)) 32 47 33 # Get information about the parallel set-up 48 ############################### 49 # Read in processor information 50 ############################### 34 51 35 52 numprocs = pypar.size() … … 40 57 M = 20 41 58 42 # Build 1*1 domain containing M*N nodes split over numprocs * 1 processors 59 ####################### 60 # Partition the domain 61 ####################### 62 63 # Build a unit mesh, subdivide it over numproces processors with each 64 # submesh containing M*N nodes 43 65 44 66 points, vertices, boundary, full_send_dict, ghost_recv_dict = \ 45 67 parallel_rectangle(N, M, len1_g=1.0) 68 69 ########################################### 70 # Start the computations on each subpartion 71 ########################################### 46 72 47 73 # Create advection domain with direction (1,-1) … … 50 76 domain = Parallel_Domain(points, vertices, boundary, 51 77 full_send_dict, ghost_recv_dict, velocity=[1.0, 0.0]) 52 53 # Make a notes of which triangles are full and which are ghost54 55 tri_full_flag = build_full_flag(domain, ghost_recv_dict)56 78 57 79 # Turn on the visualisation … … 80 102 t0 = time.time() 81 103 82 # Start the parallelcomputions104 # Start the evolve computions 83 105 84 106 for t in domain.evolve(yieldstep = 0.1, finaltime = 3.0): 85 107 if myid == 0: 86 108 domain.write_time() 87 print_test_stats(domain, tri_full_flag)88 109 89 110 # Output some computation statistics -
inundation/parallel/run_parallel_merimbula.py
r2654 r2769 2 2 ######################################################### 3 3 # 4 # Main file for parallel mesh testing. 4 # Main file for parallel mesh testing. Runs an advection 5 # flow simulation using a rectangular mesh 5 6 # 6 7 # This is a modification of the run_parallel_advection.py 7 # file .8 # file 8 9 # 9 10 # … … 18 19 # the host and processors 19 20 # 20 # *) Things still to do:21 # +) Overlap the communication and computation: The22 # communication routines in build_commun.py should be23 # interdispersed in the build_submesh.py and build_local.py24 # files. This will overlap the communication and25 # computation and will be far more efficient. This should26 # be done after more testing and there more confidence in27 # the subpartioning.28 # +) Much more testing especially with large numbers of29 # processors.30 21 # Authors: Linda Stals, Steve Roberts and Matthew Hardy, 31 22 # June 2005 … … 41 32 from os import sep 42 33 sys.path.append('..'+sep+'pyvolution') 34 35 # Numeric arrays 43 36 44 37 from Numeric import array, zeros, Float … … 106 99 # subdivide the mesh 107 100 rect = array(domain_full.xy_extent, Float) 108 109 rect = array(rect, Float) 101 # rect = array(rect, Float) 110 102 111 103 submesh = build_submesh(nodes, triangles, boundary, quantities, \ -
inundation/parallel/run_parallel_sw_merimbula_metis.py
r2738 r2769 1 1 #!/usr/bin/env python 2 2 ### 3 # Same as run_parallel_sw_merimbula.py, but uses pmesh_divide_metis4 # to partition the mesh.5 3 ######################################################### 6 4 # 7 # Main file for parallel mesh testing. 5 # Main file for parallel mesh testing. Runs a shallow 6 # water simulation using the merimbula mesh 8 7 # 9 # This is a modification of the run_parallel_advection.py10 # file.11 8 # 12 9 # 13 10 # *) The (new) files that have been added to manage the 14 11 # grid partitioning are 15 # +) pmesh_divide .py: subdivide a pmesh12 # +) pmesh_divide_metis.py: subdivide a pmesh 16 13 # +) build_submesh.py: build the submeshes on the host 17 14 # processor. … … 21 18 # the host and processors 22 19 # 23 # *) Things still to do:24 # +) Overlap the communication and computation: The25 # communication routines in build_commun.py should be26 # interdispersed in the build_submesh.py and build_local.py27 # files. This will overlap the communication and28 # computation and will be far more efficient. This should29 # be done after more testing and there more confidence in30 # the subpartioning.31 # +) Much more testing especially with large numbers of32 # processors.33 20 # Authors: Linda Stals, Steve Roberts and Matthew Hardy, 34 21 # June 2005 … … 41 28 import time 42 29 43 44 30 from os import sep 45 31 sys.path.append('..'+sep+'pyvolution') … … 49 35 from Numeric import array, zeros, Float 50 36 51 # Print debugging information52 53 from print_stats import print_test_stats, build_full_flag54 55 37 # pmesh 56 38 … … 58 40 from parallel_shallow_water import Parallel_Domain 59 41 from pmesh2domain import pmesh_to_domain_instance 60 61 # Reuse previous mesh import62 63 from caching import cache64 42 65 43 # Mesh partition routines … … 104 82 # Read in the test files 105 83 106 # filename = 'test-100.tsh'107 84 filename = 'merimbula_10785_1.tsh' 108 85 109 86 # Build the whole domain 110 87 111 #domain_full = pmesh_to_domain_instance(filename, Domain)88 domain_full = pmesh_to_domain_instance(filename, Domain) 112 89 113 domain_full = cache(pmesh_to_domain_instance, 114 (filename, Domain), 115 dependencies = [filename]) 90 # Define the domain boundaries for visualisation 116 91 117 92 rect = array(domain_full.xy_extent, Float) … … 119 94 # Initialise the wave 120 95 121 # domain_full.set_quantity('stage', Set_Stage(200.0,300.0,1.0))122 96 domain_full.set_quantity('stage', Set_Stage(756000.0,756500.0,2.0)) 123 97 124 # Subdivide the domain 125 126 # Note the different arguments compared with pmesh_divide, 127 # pmesh_divide_steve etc. 98 # Subdivide the mesh 128 99 129 100 nodes, triangles, boundary, triangles_per_proc, quantities = \ 130 101 pmesh_divide_metis(domain_full, numprocs) 131 102 132 rect = array(domain_full.xy_extent, Float) 133 103 # Build the mesh that should be assigned to each processor, 104 # this includes ghost nodes and the communicaiton pattern 105 134 106 submesh = build_submesh(nodes, triangles, boundary,\ 135 107 quantities, triangles_per_proc) … … 146 118 build_local_mesh(hostmesh, 0, triangles_per_proc[0], numprocs) 147 119 148 # Read in the mesh partition that belongs to this 149 # processor (note that the information is in the 150 # correct form for the GA data structure 120 else: 121 122 # Read in the mesh partition that belongs to this 123 # processor (note that the information is in the 124 # correct form for the GA data structure) 151 125 152 else:153 126 points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict \ 154 127 = rec_submesh(0) … … 159 132 ########################################### 160 133 161 #if myid == 0:162 # print 'ghost'163 # print ghost_recv_dict164 #processor_name165 #if myid == 0:166 # print 'full'167 # print full_send_dict168 169 134 # The visualiser needs to know the size of the whole domain 170 135 171 136 pypar.broadcast(rect,0) 137 138 # Build the domain for this processor 172 139 173 140 domain = Parallel_Domain(points, vertices, boundary, … … 175 142 ghost_recv_dict = ghost_recv_dict) 176 143 177 # Make a note of which triangles are full and which are ghost144 # Visualise the domain 178 145 179 tri_full_flag = build_full_flag(domain, ghost_recv_dict) 180 181 #try: 182 # domain.initialise_visualiser(rect=rect) 183 # domain.visualiser.updating['stage'] = True 184 # domain.visualiser.setup['elevation'] = True 185 # domain.visualiser.qcolor['stage'] = (0.0, 0.0, 0.8) 186 # domain.visualiser.scale_z['stage'] = 250 187 # domain.visualiser.scale_z['elevation'] = 250 188 #except: 189 # print 'No visualiser' 146 try: 147 domain.initialise_visualiser(rect=rect) 148 domain.visualiser.scale_z['stage'] = 0.2 149 domain.visualiser.scale_z['elevation'] = 0.05 150 except: 151 print 'No visualiser' 190 152 191 153 192 154 domain.default_order = 1 193 155 194 #Boundaries 156 # Define the boundaries, including the ghost boundary 157 195 158 from parallel_shallow_water import Transmissive_boundary, Reflective_boundary 196 159 197 160 T = Transmissive_boundary(domain) 198 161 R = Reflective_boundary(domain) 199 domain.set_boundary( {'outflow': R, 'inflow': R, 'inner':R, 'exterior': R, 'open':R} ) 162 domain.set_boundary( {'outflow': R, 'inflow': R, 'inner':R, 'exterior': R, \ 163 'open':R, 'ghost':None} ) 200 164 165 166 # Set the initial quantities 201 167 202 168 domain.set_quantity('stage', quantities['stage']) … … 204 170 205 171 domain.store = False 206 #domain.filename = 'merimbula-%d' %domain.processor207 172 208 # ---------209 # Evolution 173 # Set the number of time steps, as well as the start and end time 174 210 175 t0 = time.time() 211 212 print 'Processor %d on %s: No of elements %d'%(domain.processor,processor_name,domain.number_of_elements)213 yieldstep = 0.05214 finaltime = 500.0215 216 176 yieldstep = 1 217 177 finaltime = 90 218 178 219 #yieldstep = 1220 #finaltime = 1221 #processor_name222 #for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime):223 # if myid == 0:224 # domain.write_time()225 #print 'Processor %d, Integral of stage %d'%\226 # (domain.processor,domain.quantities['stage'].get_integral())227 # print_test_stats(domain, tri_full_flag)228 179 180 # Start the evolve calculations 229 181 230 # Profiling 231 #import profile 232 #profiler = profile.Profile() 233 #result.dump_stats("profile." + str(numprocs) + "." + str(myid) + ".dat") 182 for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime): 183 if myid == 0: 184 domain.write_time() 234 185 235 #New hotshot profiling 236 import hotshot 237 profiler = hotshot.Profile("hotshot." + str(numprocs) + "." + str(myid) + ".prof") 238 s = '''for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime): 239 if myid == 0: 240 domain.write_time() 241 print_test_stats(domain, tri_full_flag) 242 243 ''' 244 result = profiler.runctx(s, globals(), locals()) 245 profiler.close() 246 247 #print 'P%d: That took %.2f seconds' %(myid, time.time()-t0) 248 #print 'P%d: Communication time %.2f seconds' %(myid, domain.communication_time) 249 #print 'P%d: Reduction Communication time %.2f seconds' %(myid, domain.communication_reduce_time) 250 #print 'P%d: Broadcast time %.2f seconds' %(myid, domain.communication_broadcast_time) 251 252 186 # Print some timing statistics 253 187 254 188 if myid == 0: -
inundation/parallel/run_parallel_sw_rectangle.py
r2761 r2769 35 35 36 36 37 <<<<<<< .mine 38 ======= 37 39 #from pmesh_divide import pmesh_divide, pmesh_divide_steve 38 40 41 >>>>>>> .r2768 39 42 # read in the processor information 40 43
Note: See TracChangeset
for help on using the changeset viewer.