Changeset 3593
- Timestamp:
- Sep 14, 2006, 2:40:38 PM (19 years ago)
- Location:
- anuga_core/source/anuga_parallel
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
anuga_core/source/anuga_parallel/parallel_api.py
r3588 r3593 21 21 processor_name = pypar.Get_processor_name() 22 22 print 'I am processor %d of %d on node %s' %(myid, numprocs, processor_name) 23 24 25 26 27 def distribute(domain): 28 29 if myid == 0: 30 #------------------------------------------------------------------- 31 # Distribute the domain 32 #------------------------------------------------------------------- 33 34 points, vertices, boundary, quantities,\ 35 ghost_recv_dict, full_send_dict,\ 36 = distribute_mesh(domain) 37 print 'Communication done' 38 39 else: 40 # Read in the mesh partition that belongs to this 41 # processor (note that the information is in the 42 # correct form for the GA data structure) 43 44 points, vertices, boundary, quantities,\ 45 ghost_recv_dict, full_send_dict,\ 46 = rec_submesh(0) 47 48 #------------------------------------------------------------------------ 49 # Start the computations on each subpartion 50 #------------------------------------------------------------------------ 51 52 # Build the domain for this processor 53 domain = Parallel_Domain(points, vertices, boundary, 54 full_send_dict = full_send_dict, 55 ghost_recv_dict = ghost_recv_dict) 56 57 #------------------------------------------------------------------------ 58 # Setup initial conditions 59 #------------------------------------------------------------------------ 60 for q in quantities: 61 domain.set_quantity(q, quantities[q]) # Distribute all quantities 62 63 64 #------------------------------------------------------------------------ 65 # Return parallel domain to all nodes 66 #------------------------------------------------------------------------ 67 return domain 68 69 70 23 71 24 72 -
anuga_core/source/anuga_parallel/test_parallel_sw_runup.py
r3591 r3593 25 25 from parallel_api import * 26 26 27 #------------------------------------------------------------------------------28 # Read in processor information29 #------------------------------------------------------------------------------30 27 31 numprocs = pypar.size() 32 myid = pypar.rank() 33 processor_name = pypar.Get_processor_name() 34 print 'I am processor %d of %d on node %s' %(myid, numprocs, processor_name) 28 #-------------------------------------------------------------------------- 29 # Setup computational domain 30 #-------------------------------------------------------------------------- 31 points, vertices, boundary = rectangular_cross(10, 10) # Basic mesh 32 domain = Domain(points, vertices, boundary) # Create domain 33 domain.set_name('runup') # Set sww filename 35 34 36 35 37 #-------------------------------------------------------------------------- ----38 # Initialise39 #-------------------------------------------------------------------------- ----36 #-------------------------------------------------------------------------- 37 # Setup initial conditions 38 #-------------------------------------------------------------------------- 40 39 41 if myid == 0: 42 #-------------------------------------------------------------------------- 43 # Setup computational domain 44 #-------------------------------------------------------------------------- 40 def topography(x,y): 41 return -x/2 # linear bed slope 45 42 46 points, vertices, boundary = rectangular_cross(10, 10) # Basic mesh 47 48 domain = Domain(points, vertices, boundary) # Create domain 43 domain.set_quantity('elevation', topography) # Use function for elevation 44 domain.set_quantity('friction', 0.1) # Constant friction 45 domain.set_quantity('stage', -.4) # Constant initial stage 49 46 50 47 51 # Unstructured mesh 52 #polygon = [[1,1],[0,1],[0,0],[1,0]] 53 #meshname = 'runup.msh' 54 #create_mesh_from_regions(polygon, 55 # boundary_tags={'top': [0], 56 # 'left': [1], 57 # 'bottom': [2], 58 # 'right': [3]}, 59 # maximum_triangle_area=0.01, 60 # filename=meshname) 61 #domain = Domain(meshname, use_cache=True, verbose = True) 48 #-------------------------------------------------------------------------- 49 # Create the parallel domain 50 #-------------------------------------------------------------------------- 51 domain = distribute(domain) 62 52 63 53 64 65 domain.set_name('runup') # Set sww filename66 67 54 68 #------------ ------------------------------------------------------------- 69 # Setup initial conditions 70 #-------------------------------------------------------------------------- 71 72 def topography(x,y): 73 return -x/2 # linear bed slope 74 75 domain.set_quantity('elevation', topography) # Use function for elevation 76 domain.set_quantity('friction', 0.1) # Constant friction 77 domain.set_quantity('stage', -.4) # Constant initial stage 78 79 80 #------------ ------------------------------------------------------------- 81 # Distribute the domain 82 #-------------------------------------------------------------------------- 83 84 # Subdivide the mesh 85 print 'Subdivide mesh' 86 nodes, triangles, boundary, triangles_per_proc, quantities = \ 87 pmesh_divide_metis(domain, numprocs) 88 89 # Build the mesh that should be assigned to each processor, 90 # this includes ghost nodes and the communicaiton pattern 91 print 'Build submeshes' 92 submesh = build_submesh(nodes, triangles, boundary,\ 93 quantities, triangles_per_proc) 94 95 # Send the mesh partition to the appropriate processor 96 print 'Distribute submeshes' 97 for p in range(1, numprocs): 98 send_submesh(submesh, triangles_per_proc, p) 99 100 101 # Build the local mesh for processor 0 102 points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict = \ 103 extract_hostmesh(submesh, triangles_per_proc) 104 105 print 'Communication done' 106 107 108 else: 109 # Read in the mesh partition that belongs to this 110 # processor (note that the information is in the 111 # correct form for the GA data structure) 112 113 points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict, \ 114 = rec_submesh(0) 115 116 #------------------------------------------------------------------------------ 117 # Start the computations on each subpartion 118 #------------------------------------------------------------------------------ 119 120 # Build the domain for this processor 121 domain = Parallel_Domain(points, vertices, boundary, 122 full_send_dict = full_send_dict, 123 ghost_recv_dict = ghost_recv_dict) 55 # TODO: Communicate all attributes of domain including boundary conditions 124 56 125 57 # Name and dir, etc currently has to be set here as they are not … … 127 59 domain.set_name('runup') # Set sww filename 128 60 129 130 #------------------------------------------------------------------------------131 # Setup initial conditions132 #------------------------------------------------------------------------------133 for q in quantities:134 domain.set_quantity(q, quantities[q]) # Distribute all quantities135 61 136 62 … … 144 70 145 71 # Associate boundary tags with boundary objects 146 #domain.set_boundary({'left': Br, 'right': Bd, 'top': Br, 'bottom': Br,147 # 'ghost': None, 'exterior': Bd})148 72 domain.set_boundary({'left': Br, 'right': Bd, 'top': Br, 'bottom': Br, 149 73 'ghost': None})
Note: See TracChangeset
for help on using the changeset viewer.