Changeset 2152 for inundation/parallel
- Timestamp:
- Dec 16, 2005, 9:59:58 AM (19 years ago)
- Location:
- inundation/parallel
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
inundation/parallel/run_advection.py
r1639 r2152 15 15 from mesh_factory import rectangular 16 16 17 points, vertices, boundary = rectangular(60, 60) 17 #points, vertices, boundary = rectangular(60, 60) 18 points, vertices, boundary = rectangular(10, 10) 18 19 19 20 #Create advection domain with direction (1,-1) -
inundation/parallel/run_parallel_merimbula.py
r2130 r2152 7 7 # file. 8 8 # 9 # *) The test files currently avaliable are of the form10 # test*.out, eg test_5l_4c.out. The term infront of the l11 # corresponds to the number of levels of refinement12 # required to build the grid, i.e. a higher number13 # corresponds to a finer grid. The term infront of the c14 # corresponds to the number of processors.15 9 # 16 10 # *) The (new) files that have been added to manage the -
inundation/parallel/run_parallel_sw_merimbula.py
r2131 r2152 7 7 # file. 8 8 # 9 # *) The test files currently avaliable are of the form10 # test*.out, eg test_5l_4c.out. The term infront of the l11 # corresponds to the number of levels of refinement12 # required to build the grid, i.e. a higher number13 # corresponds to a finer grid. The term infront of the c14 # corresponds to the number of processors.15 9 # 16 10 # *) The (new) files that have been added to manage the … … 40 34 # 41 35 ######################################################### 36 42 37 import sys 43 38 import pypar # The Python-MPI interface 44 39 import time 45 40 46 47 41 from os import sep 48 42 sys.path.append('..'+sep+'pyvolution') 49 43 50 from Numeric import array 44 # Numeric arrays 45 46 from Numeric import array, zeros, Float 47 51 48 # pmesh 52 53 #from shallow_water import Domain54 49 55 50 from shallow_water import Domain 56 51 from parallel_shallow_water import Parallel_Domain 57 58 # mesh partition routines 52 from pmesh2domain import pmesh_to_domain_instance 53 54 # Reuse previous mesh import 55 56 from caching import cache 57 58 # Mesh partition routines 59 59 60 60 from pmesh_divide import pmesh_divide, pmesh_divide_steve 61 from build_submesh import * 62 from build_local import * 63 from build_commun import * 64 from pmesh2domain import pmesh_to_domain_instance 65 66 # read in the processor information 61 from build_submesh import build_submesh, extract_hostmesh 62 from build_local import build_local_mesh 63 from build_commun import send_submesh, rec_submesh 64 65 66 ############################### 67 # Read in processor information 68 ############################### 67 69 68 70 numprocs = pypar.size() … … 70 72 processor_name = pypar.Get_processor_name() 71 73 72 #------- 73 # Domain 74 ############################ 75 # Set the initial conditions 76 ############################ 77 74 78 rect = zeros( 4, Float) # Buffer for results 75 79 … … 86 90 return self.h*((x>self.x0)&(x<self.x1)) 87 91 92 ####################### 93 # Partition the domain 94 ####################### 88 95 89 96 if myid == 0: 90 97 91 # read in the test files98 # Read in the test files 92 99 93 100 # filename = 'test-100.tsh' … … 98 105 print "WARNING: number of subboxes is not equal to the number of proc" 99 106 100 domain_full = pmesh_to_domain_instance(filename, Domain) 101 107 # Build the whole domain 108 109 # domain_full = pmesh_to_domain_instance(filename, Domain) 110 111 domain_full = cache(pmesh_to_domain_instance, 112 (filename, Domain), 113 dependencies = [filename]) 114 115 rect = array(domain_full.xy_extent, Float) 116 117 # Initialise the wave 118 102 119 # domain_full.set_quantity('stage', Set_Stage(200.0,300.0,1.0)) 103 120 domain_full.set_quantity('stage', Set_Stage(756000.0,756500.0,2.0)) 104 121 122 # Subdivide the domain 123 105 124 nodes, triangles, boundary, triangles_per_proc, quantities = \ 106 125 pmesh_divide_steve(domain_full, nx, ny) 107 126 108 rect = array(domain_full.xy_extent, Float)109 110 127 submesh = build_submesh(nodes, triangles, boundary,\ 111 128 quantities, triangles_per_proc) 112 129 113 # send the mesh partition to the appropriate processor130 # Send the mesh partition to the appropriate processor 114 131 115 132 for p in range(1, numprocs): 116 133 send_submesh(submesh, triangles_per_proc, p) 117 134 135 # Build the local mesh for processor 0 136 118 137 hostmesh = extract_hostmesh(submesh) 119 120 points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict = \ 121 build_local_mesh(hostmesh, 0, triangles_per_proc[0], numprocs) 122 123 # read in the mesh partition that belongs to this 138 points, vertices, boundary, quantities, ghost_recv_dict, \ 139 full_send_dict = build_local_mesh(hostmesh, 0, \ 140 triangles_per_proc[0], \ 141 numprocs) 142 143 # Read in the mesh partition that belongs to this 124 144 # processor (note that the information is in the 125 145 # correct form for the GA data structure 126 146 127 147 else: 128 points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict \ 129 = rec_submesh(0) 130 148 points, vertices, boundary, quantities, ghost_recv_dict, \ 149 full_send_dict = rec_submesh(0) 150 151 152 ########################################### 153 # Start the computations on each subpartion 154 ########################################### 131 155 132 156 ## ########### start profile testing … … 149 173 ######## end profile testing 150 174 151 175 176 # The visualiser needs to know the size of the whole domain 177 152 178 pypar.broadcast(rect,0) 153 #print rect 179 154 180 155 181 domain = Parallel_Domain(points, vertices, boundary, 156 182 full_send_dict = full_send_dict, 157 183 ghost_recv_dict = ghost_recv_dict) 158 159 184 160 185 try: -
inundation/parallel/run_parallel_sw_merimbula_metis.py
r2130 r2152 10 10 # file. 11 11 # 12 # *) The test files currently avaliable are of the form13 # test*.out, eg test_5l_4c.out. The term infront of the l14 # corresponds to the number of levels of refinement15 # required to build the grid, i.e. a higher number16 # corresponds to a finer grid. The term infront of the c17 # corresponds to the number of processors.18 12 # 19 13 # *) The (new) files that have been added to manage the … … 51 45 sys.path.append('..'+sep+'pyvolution') 52 46 53 from Numeric import array 47 # Numeric arrays 48 49 from Numeric import array, zeros, Float 50 54 51 # pmesh 55 56 #from shallow_water import Domain57 52 58 53 from shallow_water import Domain 59 54 from parallel_shallow_water import Parallel_Domain 60 61 # mesh partition routines 55 from pmesh2domain import pmesh_to_domain_instance 56 57 # Reuse previous mesh import 58 59 from caching import cache 60 61 # Mesh partition routines 62 62 63 63 from pmesh_divide import pmesh_divide_metis 64 from build_submesh import * 65 from build_local import * 66 from build_commun import * 67 from pmesh2domain import pmesh_to_domain_instance 68 69 # read in the processor information 64 from build_submesh import build_submesh, extract_hostmesh 65 from build_local import build_local_mesh 66 from build_commun import send_submesh, rec_submesh 67 68 ############################### 69 # Read in processor information 70 ############################### 70 71 71 72 numprocs = pypar.size() … … 73 74 processor_name = pypar.Get_processor_name() 74 75 75 #------- 76 # Domain 76 ############################ 77 # Set the initial conditions 78 ############################ 79 77 80 rect = zeros( 4, Float) # Buffer for results 78 81 … … 89 92 return self.h*((x>self.x0)&(x<self.x1)) 90 93 94 ####################### 95 # Partition the domain 96 ####################### 91 97 92 98 if myid == 0: 93 99 94 # read in the test files100 # Read in the test files 95 101 96 102 # filename = 'test-100.tsh' 97 103 filename = 'merimbula_10785_1.tsh' 98 104 99 domain_full = pmesh_to_domain_instance(filename, Domain) 105 # Build the whole domain 106 107 # domain_full = pmesh_to_domain_instance(filename, Domain) 108 109 domain_full = cache(pmesh_to_domain_instance, 110 (filename, Domain), 111 dependencies = [filename]) 112 113 rect = array(domain_full.xy_extent, Float) 114 115 # Initialise the wave 100 116 101 117 # domain_full.set_quantity('stage', Set_Stage(200.0,300.0,1.0)) 102 118 domain_full.set_quantity('stage', Set_Stage(756000.0,756500.0,2.0)) 119 120 # Subdivide the domain 103 121 104 122 # Note the different arguments compared with pmesh_divide, … … 113 131 quantities, triangles_per_proc) 114 132 115 # send the mesh partition to the appropriate processor133 # Send the mesh partition to the appropriate processor 116 134 117 135 for p in range(1, numprocs): 118 136 send_submesh(submesh, triangles_per_proc, p) 137 138 # Build the local mesh for processor 0 119 139 120 140 hostmesh = extract_hostmesh(submesh) … … 122 142 build_local_mesh(hostmesh, 0, triangles_per_proc[0], numprocs) 123 143 124 # read in the mesh partition that belongs to this144 # Read in the mesh partition that belongs to this 125 145 # processor (note that the information is in the 126 146 # correct form for the GA data structure … … 129 149 points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict \ 130 150 = rec_submesh(0) 151 152 153 ########################################### 154 # Start the computations on each subpartion 155 ########################################### 131 156 132 157 #if myid == 0: … … 138 163 # print full_send_dict 139 164 165 # The visualiser needs to know the size of the whole domain 140 166 141 167 pypar.broadcast(rect,0) 142 #print rect143 168 144 169 domain = Parallel_Domain(points, vertices, boundary, -
inundation/parallel/run_parallel_sw_rectangle.py
r2130 r2152 1 #!/usr/bin/env /python1 #!/usr/bin/env python 2 2 ######################################################### 3 3 # … … 7 7 # file. 8 8 # 9 # *) The test files currently avaliable are of the form10 # test*.out, eg test_5l_4c.out. The term infront of the l11 # corresponds to the number of levels of refinement12 # required to build the grid, i.e. a higher number13 # corresponds to a finer grid. The term infront of the c14 # corresponds to the number of processors.15 9 # 16 # *) The (new) files that have been added to manage the17 # grid partitioning are18 # +) pmesh_divide.py: subdivide a pmesh19 # +) build_submesh.py: build the submeshes on the host20 # processor.21 # +) build_local.py: build the GA mesh datastructure22 # on each processor.23 # +) build_commun.py: handle the communication between24 # the host and processors25 #26 # *) Things still to do:27 # +) Overlap the communication and computation: The28 # communication routines in build_commun.py should be29 # interdispersed in the build_submesh.py and build_local.py30 # files. This will overlap the communication and31 # computation and will be far more efficient. This should32 # be done after more testing and there more confidence in33 # the subpartioning.34 # +) Much more testing especially with large numbers of35 # processors.36 10 # Authors: Linda Stals, Steve Roberts and Matthew Hardy, 37 11 # June 2005 … … 62 36 63 37 from pmesh_divide import pmesh_divide, pmesh_divide_steve 64 from build_submesh import *65 from build_local import *66 from build_commun import *67 from pmesh2domain import pmesh_to_domain_instance68 38 69 39 # read in the processor information … … 72 42 myid = pypar.rank() 73 43 processor_name = pypar.Get_processor_name() 74 75 76 77 78 44 79 45 M = 20
Note: See TracChangeset
for help on using the changeset viewer.