source: anuga_core/source/anuga_parallel/parallel_api.py @ 3622

Last change on this file since 3622 was 3612, checked in by ole, 17 years ago

Parallel API now transfers all quantities, boundary conditions and some domain attributes in addition to the meshes.

Parallelisation virtually down to one line of code!

File size: 4.8 KB
Line 
1"""Trying to lump parallel stuff into simpler interface
2
3
4"""
5
6# Parallelism
7import pypar   # The Python-MPI interface
8from anuga_parallel.pmesh_divide  import pmesh_divide_metis
9from anuga_parallel.build_submesh import build_submesh
10from anuga_parallel.build_local   import build_local_mesh
11from anuga_parallel.build_commun  import send_submesh, rec_submesh, extract_hostmesh
12from anuga_parallel.parallel_shallow_water import Parallel_Domain
13
14
15#------------------------------------------------------------------------------
16# Read in processor information
17#------------------------------------------------------------------------------
18
19numprocs = pypar.size()
20myid = pypar.rank()
21processor_name = pypar.Get_processor_name()
22print 'I am processor %d of %d on node %s' %(myid, numprocs, processor_name)
23
24
25
26
27def distribute(domain, verbose=False):
28    """ Distribute the domain to all processes
29    """
30
31    # For some obscure reason this communication must happen prior to
32    # the more complex mesh distribution - Oh Well!
33    if myid == 0:
34        domain_name = domain.get_name()
35        domain_dir = domain.get_datadir()
36        # FIXME - what other attributes need to be transferred?
37
38        for p in range(1, numprocs):
39            pypar.send((domain_name, domain_dir), p)
40    else:
41        if verbose: print 'P%d: Receiving domain attributes' %(myid)
42
43        domain_name, domain_dir = pypar.receive(0)
44
45
46
47    # Distribute boundary conditions   
48    if myid == 0:
49        boundary_map = domain.boundary_map
50        for p in range(1, numprocs):
51            pypar.send(boundary_map, p)
52    else:
53        if verbose: print 'P%d: Receiving boundary map' %(myid)       
54
55        boundary_map = pypar.receive(0)
56       
57
58
59
60    if myid == 0:
61        # Partition and distribute mesh.
62        # Structures returned is in the
63        # correct form for the ANUGA data structure
64
65
66        points, vertices, boundary, quantities,\
67                ghost_recv_dict, full_send_dict,\
68                = distribute_mesh(domain)
69
70        if verbose: print 'Communication done'
71       
72    else:
73        # Read in the mesh partition that belongs to this
74        # processor
75        if verbose: print 'P%d: Receiving submeshes' %(myid)               
76        points, vertices, boundary, quantities,\
77                ghost_recv_dict, full_send_dict,\
78                = rec_submesh(0)
79
80
81    #------------------------------------------------------------------------
82    # Build the domain for this processor using partion structures
83    #------------------------------------------------------------------------
84    domain = Parallel_Domain(points, vertices, boundary,
85                             full_send_dict  = full_send_dict,
86                             ghost_recv_dict = ghost_recv_dict)
87
88    #------------------------------------------------------------------------
89    # Transfer initial conditions to each subdomain
90    #------------------------------------------------------------------------
91    for q in quantities:
92        domain.set_quantity(q, quantities[q]) 
93
94
95    #------------------------------------------------------------------------
96    # Transfer boundary conditions to each subdomain
97    #------------------------------------------------------------------------
98    boundary_map['ghost'] = None  # Add binding to ghost boundary
99    domain.set_boundary(boundary_map)
100
101
102    #------------------------------------------------------------------------
103    # Transfer other attributes to each subdomain
104    #------------------------------------------------------------------------
105    domain.set_name(domain_name)
106    domain.set_datadir(domain_dir)       
107
108    #------------------------------------------------------------------------
109    # Return parallel domain to all nodes
110    #------------------------------------------------------------------------
111    return domain   
112
113
114
115
116
117
118
119def distribute_mesh(domain):
120
121    numprocs = pypar.size()
122
123   
124    # Subdivide the mesh
125    print 'Subdivide mesh'
126    nodes, triangles, boundary, triangles_per_proc, quantities = \
127           pmesh_divide_metis(domain, numprocs)
128
129    # Build the mesh that should be assigned to each processor,
130    # this includes ghost nodes and the communicaiton pattern
131    print 'Build submeshes'   
132    submesh = build_submesh(nodes, triangles, boundary,\
133                            quantities, triangles_per_proc)
134
135    # Send the mesh partition to the appropriate processor
136    print 'Distribute submeshes'       
137    for p in range(1, numprocs):
138      send_submesh(submesh, triangles_per_proc, p)
139
140    # Build the local mesh for processor 0
141    points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict = \
142              extract_hostmesh(submesh, triangles_per_proc)
143
144    # Return structures necessary for building the parallel domain
145    return points, vertices, boundary, quantities, \
146           ghost_recv_dict, full_send_dict
147   
148
149
150
Note: See TracBrowser for help on using the repository browser.