Changeset 7449
- Timestamp:
- Aug 28, 2009, 6:19:35 PM (15 years ago)
- Location:
- anuga_core/source/anuga_parallel
- Files:
-
- 1 added
- 6 edited
- 1 moved
Legend:
- Unmodified
- Added
- Removed
-
anuga_core/source/anuga_parallel/__init__.py
r3460 r7449 1 1 2 #Make parallel available as a Python package 2 3 pass -
anuga_core/source/anuga_parallel/parallel_advection.py
r7447 r7449 2 2 3 3 4 """Class Parallel_ Domain -4 """Class Parallel_domain - 5 5 2D triangular domains for finite-volume computations of 6 6 the advection equation, with extra structures to allow 7 communication between other Parallel_ Domains and itself7 communication between other Parallel_domains and itself 8 8 9 9 This module contains a specialisation of class Domain from module advection.py … … 13 13 """ 14 14 15 import logging, logging.config16 logger = logging.getLogger('parallel')17 logger.setLevel(logging.WARNING)18 19 try:20 logging.config.fileConfig('log.ini')21 except:22 pass23 24 15 from anuga.advection import * 25 16 26 17 27 #from Numeric import zeros, Float, Int, ones, allclose, array28 18 import numpy as num 29 19 … … 31 21 32 22 33 class Parallel_ Domain(Domain):23 class Parallel_domain(Domain): 34 24 35 25 def __init__(self, -
anuga_core/source/anuga_parallel/parallel_api.py
r7448 r7449 22 22 from anuga_parallel.distribute_mesh import pmesh_divide_metis 23 23 24 from anuga_parallel.parallel_shallow_water import Parallel_ Domain24 from anuga_parallel.parallel_shallow_water import Parallel_domain 25 25 26 26 #------------------------------------------------------------------------------ … … 31 31 myid = rank() 32 32 processor_name = get_processor_name() 33 print 'I am processor %d of %d on node %s' %(myid, numprocs, processor_name)33 #print 'I am processor %d of %d on node %s' %(myid, numprocs, processor_name) 34 34 35 35 … … 98 98 ghost_recv_dict, full_send_dict,\ 99 99 number_of_full_nodes, number_of_full_triangles =\ 100 distribute_mesh(domain )100 distribute_mesh(domain, verbose=verbose) 101 101 102 102 … … 110 110 ghost_recv_dict, full_send_dict,\ 111 111 number_of_full_nodes, number_of_full_triangles =\ 112 rec_submesh(0 )112 rec_submesh(0, verbose) 113 113 114 114 … … 117 117 #------------------------------------------------------------------------ 118 118 119 if verbose: print 'myid ',myid, number_of_full_nodes, number_of_full_triangles120 121 122 domain = Parallel_ Domain(points, vertices, boundary,119 if verbose: print 'myid = %g, no_full_nodes = %g, no_full_triangles = %g' % (myid, number_of_full_nodes, number_of_full_triangles) 120 121 122 domain = Parallel_domain(points, vertices, boundary, 123 123 full_send_dict=full_send_dict, 124 124 ghost_recv_dict=ghost_recv_dict, … … 157 157 158 158 159 def distribute_mesh(domain ):159 def distribute_mesh(domain, verbose=False): 160 160 161 161 numprocs = size() … … 163 163 164 164 # Subdivide the mesh 165 print 'Subdivide mesh'165 if verbose: print 'Subdivide mesh' 166 166 nodes, triangles, boundary, triangles_per_proc, quantities = \ 167 167 pmesh_divide_metis(domain, numprocs) … … 172 172 # Build the mesh that should be assigned to each processor, 173 173 # this includes ghost nodes and the communication pattern 174 print 'Build submeshes'174 if verbose: print 'Build submeshes' 175 175 submesh = build_submesh(nodes, triangles, boundary,\ 176 176 quantities, triangles_per_proc) 177 177 178 for p in range(numprocs): 179 N = len(submesh['ghost_nodes'][p]) 180 M = len(submesh['ghost_triangles'][p]) 181 print 'There are %d ghost nodes and %d ghost triangles on proc %d'\ 182 %(N, M, p) 178 if verbose: 179 for p in range(numprocs): 180 N = len(submesh['ghost_nodes'][p]) 181 M = len(submesh['ghost_triangles'][p]) 182 print 'There are %d ghost nodes and %d ghost triangles on proc %d'\ 183 %(N, M, p) 183 184 184 185 185 186 # Send the mesh partition to the appropriate processor 186 print 'Distribute submeshes'187 if verbose: print 'Distribute submeshes' 187 188 for p in range(1, numprocs): 188 send_submesh(submesh, triangles_per_proc, p )189 send_submesh(submesh, triangles_per_proc, p, verbose) 189 190 190 191 # Build the local mesh for processor 0 -
anuga_core/source/anuga_parallel/parallel_shallow_water.py
r7447 r7449 1 """Class Parallel_ Shallow_Water_Domain -1 """Class Parallel_shallow_water_domain - 2 2 2D triangular domains for finite-volume computations of 3 3 the shallow water equation, with extra structures to allow 4 communication between other Parallel_ Domains and itself4 communication between other Parallel_domains and itself 5 5 6 6 This module contains a specialisation of class Domain … … 12 12 """ 13 13 14 import logging, logging.config15 logger = logging.getLogger('parallel')16 logger.setLevel(logging.WARNING)17 18 try:19 logging.config.fileConfig('log.ini')20 except:21 pass22 23 24 14 from anuga.interface import Domain 25 15 … … 30 20 31 21 32 class Parallel_ Domain(Domain):22 class Parallel_domain(Domain): 33 23 34 24 def __init__(self, coordinates, vertices, … … 248 238 self.communication_time += time.time()-t0 249 239 250 251 ## This was removed due to not beening required to be redefined in parallel_shallow_water252 ## the original "write_time" is good... however might need some small edits to work properly253 ## with parallel- Nick and Ole April 2007254 ## def write_time(self):255 ## if self.min_timestep == self.max_timestep:256 ## print 'Processor %d/%d, Time = %.4f, delta t = %.8f, steps=%d (%d)'\257 ## %(self.processor, self.numproc,258 ## self.time, self.min_timestep, self.number_of_steps,259 ## self.number_of_first_order_steps)260 ## elif self.min_timestep > self.max_timestep:261 ## print 'Processor %d/%d, Time = %.4f, steps=%d (%d)'\262 ## %(self.processor, self.numproc,263 ## self.time, self.number_of_steps,264 ## self.number_of_first_order_steps)265 ## else:266 ## print 'Processor %d/%d, Time = %.4f, delta t in [%.8f, %.8f], steps=%d (%d)'\267 ## %(self.processor, self.numproc,268 ## self.time, self.min_timestep,269 ## self.max_timestep, self.number_of_steps,270 ## self.number_of_first_order_steps)271 272 273 # commented out on the 7/11/06274 # def evolve(self, yieldstep=None, finaltime=None,275 # skip_initial_step=False):276 # """Specialisation of basic evolve method from parent class277 # """278 279 #Initialise real time viz if requested280 # if self.time == 0.0:281 # pass282 283 #Call basic machinery from parent class284 # for t in Domain.evolve(self, yieldstep, finaltime, skip_initial_step):285 286 #Pass control on to outer loop for more specific actions287 # yield(t) -
anuga_core/source/anuga_parallel/run_parallel_sw_rectangle.py
r7447 r7449 14 14 ######################################################### 15 15 16 import pypar # The Python-MPI interface17 16 import time 18 17 19 #from Numeric import array20 # pmesh21 18 import numpy as num 22 19 23 20 from print_stats import print_test_stats, build_full_flag 24 21 25 from anuga.shallow_water import Domain 26 from parallel_shallow_water import Parallel_Domain 22 #---------------------------- 23 # Sequential interface 24 #--------------------------- 25 from anuga.interface import Domain 26 from anuga.interface import Transmissive_boundary, Reflective_boundary 27 27 28 29 # mesh partition routines 30 from parallel_meshes import parallel_rectangle 28 #---------------------------- 29 # Parallel interface 30 #--------------------------- 31 from anuga_parallel.interface import Parallel_shallow_water_domain 32 from anuga_parallel.interface import parallel_rectangle 33 from anuga_parallel.interface import myid, numprocs, get_processor_name 31 34 32 35 ############################### 33 36 # Read in processor information 34 37 ############################### 35 numprocs = pypar.size()36 myid = pypar.rank()37 processor_name = pypar.get_processor_name()38 numprocs = numprocs() 39 myid = myid() 40 processor_name = get_processor_name() 38 41 39 42 M = 50 … … 54 57 ########################################### 55 58 56 domain = Parallel_ Domain(points, vertices, boundary,57 full_send_dict = full_send_dict,58 ghost_recv_dict = ghost_recv_dict)59 domain = Parallel_shallow_water_domain(points, vertices, boundary, 60 full_send_dict = full_send_dict, 61 ghost_recv_dict = ghost_recv_dict) 59 62 60 63 61 64 #Boundaries 62 from parallel_shallow_water import Transmissive_boundary, Reflective_boundary 65 63 66 64 67 T = Transmissive_boundary(domain) -
anuga_core/source/anuga_parallel/test_distribute_domain.py
r7448 r7449 16 16 import pypar 17 17 18 #from Numeric import allclose, array, zeros, Float, take, nonzero19 20 18 import numpy as num 21 19 22 from anuga.pmesh.mesh_interface import create_mesh_from_regions 23 24 from anuga.interface import rectangular_cross 25 from anuga.abstract_2d_finite_volumes.pmesh2domain import pmesh_to_domain_instance 20 21 26 22 27 23 from anuga.utilities.numerical_tools import ensure_numeric … … 35 31 from anuga.interface import Transmissive_boundary 36 32 37 38 from anuga_parallel.parallel_api import distribute, myid, numprocs 33 from anuga.interface import rectangular_cross 34 from anuga.interface import create_domain_from_file 35 36 37 from anuga_parallel.interface import distribute, myid, numprocs 39 38 40 39 … … 71 70 72 71 73 domain = pmesh_to_domain_instance(mesh_filename, Domain)72 domain = create_domain_from_file(mesh_filename) 74 73 domain.set_quantity('stage', Set_Stage(756000.0, 756500.0, 2.0)) 75 74 … … 80 79 if parallel: 81 80 if myid == 0: print 'DISTRIBUTING PARALLEL DOMAIN' 82 domain = distribute(domain , verbose=False)81 domain = distribute(domain) 83 82 84 83 #------------------------------------------------------------------------------ … … 126 125 l2norm[2] = pow(l2norm[2], 2) 127 126 if myid == 0: 128 domain.write_time()127 #domain.write_time() 129 128 130 129 #print edges[:,1] … … 151 150 pypar.send(linfnorm, 0) 152 151 else: 153 domain.write_time()152 #domain.write_time() 154 153 l1list.append(l1norm) 155 154 l2list.append(l2norm) … … 159 158 return (l1list, l2list, linflist) 160 159 161 # Test an 8-way run of the shallow water equations160 # Test an nprocs-way run of the shallow water equations 162 161 # against the sequential code. 163 162 164 class Test_ Parallel_Sw(unittest.TestCase):165 def test ParallelSw(self):163 class Test_distribute_domain(unittest.TestCase): 164 def test_distribute_domain(self): 166 165 print "Expect this test to fail if not run from the parallel directory." 167 result = os.system("mpirun -np %d python test_ parallel_sw.py" % nprocs)166 result = os.system("mpirun -np %d python test_distribute_domain.py" % nprocs) 168 167 assert_(result == 0) 169 168 … … 178 177 if numprocs == 1: 179 178 runner = unittest.TextTestRunner() 180 suite = unittest.makeSuite(Test_ Parallel_Sw, 'test')179 suite = unittest.makeSuite(Test_distribute_domain, 'test') 181 180 runner.run(suite) 182 181 else: -
anuga_core/source/anuga_parallel/test_distribute_mesh.py
r7448 r7449 9 9 from anuga.interface import rectangular_cross 10 10 11 from parallel_shallow_water import Parallel_Domain 12 13 from anuga_parallel.distribute_domain import pmesh_divide_metis 14 from anuga_parallel.distribute_domain import build_submesh 15 from anuga_parallel.distribute_domain import submesh_full, submesh_ghost, submesh_quantities 16 from anuga_parallel.distribute_domain import extract_hostmesh, rec_submesh, send_submesh 11 from anuga_parallel.distribute_mesh import pmesh_divide_metis 12 from anuga_parallel.distribute_mesh import build_submesh 13 from anuga_parallel.distribute_mesh import submesh_full, submesh_ghost, submesh_quantities 14 from anuga_parallel.distribute_mesh import extract_hostmesh, rec_submesh, send_submesh 17 15 18 16 import numpy as num … … 341 339 assert num.allclose(full_send_dict[0],true_full_send[0]) 342 340 assert num.allclose(full_send_dict[1],true_full_send[1]) 343 344 """345 par_domain = Parallel_Domain(points, vertices, boundary,346 full_send_dict = full_send_dict,347 ghost_recv_dict = ghost_recv_dict)348 """349 350 351 352 353 341 354 342
Note: See TracChangeset
for help on using the changeset viewer.