Changeset 7449


Ignore:
Timestamp:
Aug 28, 2009, 6:19:35 PM (15 years ago)
Author:
steve
Message:

Testing unit tests

Location:
anuga_core/source/anuga_parallel
Files:
1 added
6 edited
1 moved

Legend:

Unmodified
Added
Removed
  • anuga_core/source/anuga_parallel/__init__.py

    r3460 r7449  
     1
    12#Make parallel available as a Python package
    23pass
  • anuga_core/source/anuga_parallel/parallel_advection.py

    r7447 r7449  
    22
    33
    4 """Class Parallel_Domain -
     4"""Class Parallel_domain -
    552D triangular domains for finite-volume computations of
    66the advection equation, with extra structures to allow
    7 communication between other Parallel_Domains and itself
     7communication between other Parallel_domains and itself
    88
    99This module contains a specialisation of class Domain from module advection.py
     
    1313"""
    1414
    15 import logging, logging.config
    16 logger = logging.getLogger('parallel')
    17 logger.setLevel(logging.WARNING)
    18 
    19 try:
    20     logging.config.fileConfig('log.ini')
    21 except:
    22     pass
    23 
    2415from anuga.advection import *
    2516
    2617
    27 #from Numeric import zeros, Float, Int, ones, allclose, array
    2818import numpy as num
    2919
     
    3121
    3222
    33 class Parallel_Domain(Domain):
     23class Parallel_domain(Domain):
    3424
    3525    def __init__(self,
  • anuga_core/source/anuga_parallel/parallel_api.py

    r7448 r7449  
    2222    from anuga_parallel.distribute_mesh import pmesh_divide_metis
    2323
    24     from anuga_parallel.parallel_shallow_water import Parallel_Domain
     24    from anuga_parallel.parallel_shallow_water import Parallel_domain
    2525
    2626#------------------------------------------------------------------------------
     
    3131myid = rank()
    3232processor_name = get_processor_name()
    33 print 'I am processor %d of %d on node %s' %(myid, numprocs, processor_name)
     33#print 'I am processor %d of %d on node %s' %(myid, numprocs, processor_name)
    3434
    3535
     
    9898                ghost_recv_dict, full_send_dict,\
    9999                number_of_full_nodes, number_of_full_triangles =\
    100                 distribute_mesh(domain)
     100                distribute_mesh(domain, verbose=verbose)
    101101
    102102
     
    110110                ghost_recv_dict, full_send_dict,\
    111111                number_of_full_nodes, number_of_full_triangles =\
    112                 rec_submesh(0)
     112                rec_submesh(0, verbose)
    113113
    114114
     
    117117    #------------------------------------------------------------------------
    118118
    119     if verbose: print 'myid ',myid, number_of_full_nodes, number_of_full_triangles
    120 
    121    
    122     domain = Parallel_Domain(points, vertices, boundary,
     119    if verbose: print 'myid = %g, no_full_nodes = %g, no_full_triangles = %g' % (myid, number_of_full_nodes, number_of_full_triangles)
     120
     121   
     122    domain = Parallel_domain(points, vertices, boundary,
    123123                             full_send_dict=full_send_dict,
    124124                             ghost_recv_dict=ghost_recv_dict,
     
    157157
    158158
    159 def distribute_mesh(domain):
     159def distribute_mesh(domain, verbose=False):
    160160
    161161    numprocs = size()
     
    163163   
    164164    # Subdivide the mesh
    165     print 'Subdivide mesh'
     165    if verbose: print 'Subdivide mesh'
    166166    nodes, triangles, boundary, triangles_per_proc, quantities = \
    167167           pmesh_divide_metis(domain, numprocs)
     
    172172    # Build the mesh that should be assigned to each processor,
    173173    # this includes ghost nodes and the communication pattern
    174     print 'Build submeshes'   
     174    if verbose: print 'Build submeshes'   
    175175    submesh = build_submesh(nodes, triangles, boundary,\
    176176                            quantities, triangles_per_proc)
    177177
    178     for p in range(numprocs):
    179         N = len(submesh['ghost_nodes'][p])               
    180         M = len(submesh['ghost_triangles'][p])
    181         print 'There are %d ghost nodes and %d ghost triangles on proc %d'\
    182               %(N, M, p)
     178    if verbose:
     179        for p in range(numprocs):
     180            N = len(submesh['ghost_nodes'][p])               
     181            M = len(submesh['ghost_triangles'][p])
     182            print 'There are %d ghost nodes and %d ghost triangles on proc %d'\
     183                  %(N, M, p)
    183184
    184185
    185186    # Send the mesh partition to the appropriate processor
    186     print 'Distribute submeshes'       
     187    if verbose: print 'Distribute submeshes'       
    187188    for p in range(1, numprocs):
    188       send_submesh(submesh, triangles_per_proc, p)
     189      send_submesh(submesh, triangles_per_proc, p, verbose)
    189190
    190191    # Build the local mesh for processor 0
  • anuga_core/source/anuga_parallel/parallel_shallow_water.py

    r7447 r7449  
    1 """Class Parallel_Shallow_Water_Domain -
     1"""Class Parallel_shallow_water_domain -
    222D triangular domains for finite-volume computations of
    33the shallow water equation, with extra structures to allow
    4 communication between other Parallel_Domains and itself
     4communication between other Parallel_domains and itself
    55
    66This module contains a specialisation of class Domain
     
    1212"""
    1313
    14 import logging, logging.config
    15 logger = logging.getLogger('parallel')
    16 logger.setLevel(logging.WARNING)
    17 
    18 try:
    19     logging.config.fileConfig('log.ini')
    20 except:
    21     pass
    22 
    23 
    2414from anuga.interface import Domain
    2515
     
    3020
    3121
    32 class Parallel_Domain(Domain):
     22class Parallel_domain(Domain):
    3323
    3424    def __init__(self, coordinates, vertices,
     
    248238        self.communication_time += time.time()-t0
    249239
    250 
    251 ## This was removed due to not beening required to be redefined in parallel_shallow_water
    252 ## the original "write_time" is good... however might need some small edits to work properly
    253 ## with parallel- Nick and Ole April 2007
    254 ##     def write_time(self):
    255 ##         if self.min_timestep == self.max_timestep:
    256 ##             print 'Processor %d/%d, Time = %.4f, delta t = %.8f, steps=%d (%d)'\
    257 ##                   %(self.processor, self.numproc,
    258 ##                     self.time, self.min_timestep, self.number_of_steps,
    259 ##                     self.number_of_first_order_steps)
    260 ##         elif self.min_timestep > self.max_timestep:
    261 ##             print 'Processor %d/%d, Time = %.4f, steps=%d (%d)'\
    262 ##                   %(self.processor, self.numproc,
    263 ##                     self.time, self.number_of_steps,
    264 ##                     self.number_of_first_order_steps)
    265 ##         else:
    266 ##             print 'Processor %d/%d, Time = %.4f, delta t in [%.8f, %.8f], steps=%d (%d)'\
    267 ##                   %(self.processor, self.numproc,
    268 ##                     self.time, self.min_timestep,
    269 ##                     self.max_timestep, self.number_of_steps,
    270 ##                     self.number_of_first_order_steps)
    271 
    272 
    273 # commented out on the 7/11/06
    274 #    def evolve(self, yieldstep=None, finaltime=None,
    275 #               skip_initial_step=False):
    276 #        """Specialisation of basic evolve method from parent class
    277 #        """
    278 
    279         #Initialise real time viz if requested
    280 #        if self.time == 0.0:
    281 #            pass
    282 
    283         #Call basic machinery from parent class
    284 #        for t in Domain.evolve(self, yieldstep, finaltime, skip_initial_step):
    285 
    286             #Pass control on to outer loop for more specific actions
    287 #            yield(t)
  • anuga_core/source/anuga_parallel/run_parallel_sw_rectangle.py

    r7447 r7449  
    1414#########################################################
    1515
    16 import pypar    # The Python-MPI interface
    1716import time
    1817
    19 #from Numeric import array
    20 # pmesh
    2118import numpy as num
    2219
    2320from print_stats import print_test_stats, build_full_flag
    2421
    25 from anuga.shallow_water import Domain
    26 from parallel_shallow_water import Parallel_Domain
     22#----------------------------
     23# Sequential interface
     24#---------------------------
     25from anuga.interface import Domain
     26from anuga.interface import Transmissive_boundary, Reflective_boundary
    2727
    28 
    29 # mesh partition routines
    30 from parallel_meshes import parallel_rectangle
     28#----------------------------
     29# Parallel interface
     30#---------------------------
     31from anuga_parallel.interface import Parallel_shallow_water_domain
     32from anuga_parallel.interface import parallel_rectangle
     33from anuga_parallel.interface import myid, numprocs, get_processor_name
    3134
    3235###############################
    3336# Read in processor information
    3437###############################
    35 numprocs = pypar.size()
    36 myid = pypar.rank()
    37 processor_name = pypar.get_processor_name()
     38numprocs = numprocs()
     39myid     = myid()
     40processor_name = get_processor_name()
    3841
    3942M = 50
     
    5457###########################################
    5558
    56 domain = Parallel_Domain(points, vertices, boundary,
    57                          full_send_dict  = full_send_dict,
    58                          ghost_recv_dict = ghost_recv_dict)
     59domain = Parallel_shallow_water_domain(points, vertices, boundary,
     60                                       full_send_dict  = full_send_dict,
     61                                       ghost_recv_dict = ghost_recv_dict)
    5962
    6063
    6164#Boundaries
    62 from parallel_shallow_water import Transmissive_boundary, Reflective_boundary
     65
    6366
    6467T = Transmissive_boundary(domain)
  • anuga_core/source/anuga_parallel/test_distribute_domain.py

    r7448 r7449  
    1616import pypar
    1717
    18 #from Numeric import allclose, array, zeros, Float, take, nonzero
    19 
    2018import numpy as num
    2119
    22 from anuga.pmesh.mesh_interface import create_mesh_from_regions
    23 
    24 from anuga.interface import rectangular_cross
    25 from anuga.abstract_2d_finite_volumes.pmesh2domain import pmesh_to_domain_instance
     20
     21
    2622
    2723from anuga.utilities.numerical_tools import ensure_numeric
     
    3531from anuga.interface import Transmissive_boundary
    3632
    37 
    38 from anuga_parallel.parallel_api import distribute, myid, numprocs
     33from anuga.interface import rectangular_cross
     34from anuga.interface import create_domain_from_file
     35
     36
     37from anuga_parallel.interface import distribute, myid, numprocs
    3938
    4039
     
    7170
    7271
    73     domain = pmesh_to_domain_instance(mesh_filename, Domain)
     72    domain = create_domain_from_file(mesh_filename)
    7473    domain.set_quantity('stage', Set_Stage(756000.0, 756500.0, 2.0))
    7574
     
    8079    if parallel:
    8180        if myid == 0: print 'DISTRIBUTING PARALLEL DOMAIN'
    82         domain = distribute(domain, verbose=False)
     81        domain = distribute(domain)
    8382
    8483    #------------------------------------------------------------------------------
     
    126125            l2norm[2] = pow(l2norm[2], 2)
    127126            if myid == 0:
    128                 domain.write_time()
     127                #domain.write_time()
    129128
    130129                #print edges[:,1]           
     
    151150                pypar.send(linfnorm, 0)
    152151        else:
    153             domain.write_time()
     152            #domain.write_time()
    154153            l1list.append(l1norm)               
    155154            l2list.append(l2norm)
     
    159158    return (l1list, l2list, linflist)
    160159
    161 # Test an 8-way run of the shallow water equations
     160# Test an nprocs-way run of the shallow water equations
    162161# against the sequential code.
    163162
    164 class Test_Parallel_Sw(unittest.TestCase):
    165     def testParallelSw(self):
     163class Test_distribute_domain(unittest.TestCase):
     164    def test_distribute_domain(self):
    166165        print "Expect this test to fail if not run from the parallel directory."
    167         result = os.system("mpirun -np %d python test_parallel_sw.py" % nprocs)
     166        result = os.system("mpirun -np %d python test_distribute_domain.py" % nprocs)
    168167        assert_(result == 0)
    169168
     
    178177    if numprocs == 1:
    179178        runner = unittest.TextTestRunner()
    180         suite = unittest.makeSuite(Test_Parallel_Sw, 'test')
     179        suite = unittest.makeSuite(Test_distribute_domain, 'test')
    181180        runner.run(suite)
    182181    else:
  • anuga_core/source/anuga_parallel/test_distribute_mesh.py

    r7448 r7449  
    99from anuga.interface import rectangular_cross
    1010
    11 from parallel_shallow_water import Parallel_Domain
    12 
    13 from anuga_parallel.distribute_domain import pmesh_divide_metis
    14 from anuga_parallel.distribute_domain import build_submesh
    15 from anuga_parallel.distribute_domain import submesh_full, submesh_ghost, submesh_quantities
    16 from anuga_parallel.distribute_domain import extract_hostmesh, rec_submesh, send_submesh
     11from anuga_parallel.distribute_mesh import pmesh_divide_metis
     12from anuga_parallel.distribute_mesh import build_submesh
     13from anuga_parallel.distribute_mesh import submesh_full, submesh_ghost, submesh_quantities
     14from anuga_parallel.distribute_mesh import extract_hostmesh, rec_submesh, send_submesh
    1715
    1816import numpy as num
     
    341339                assert num.allclose(full_send_dict[0],true_full_send[0])
    342340                assert num.allclose(full_send_dict[1],true_full_send[1])
    343 
    344                 """
    345         par_domain = Parallel_Domain(points, vertices, boundary,
    346                                      full_send_dict  = full_send_dict,
    347                                      ghost_recv_dict = ghost_recv_dict)
    348                                      """
    349 
    350 
    351 
    352 
    353341
    354342
Note: See TracChangeset for help on using the changeset viewer.