source: branches/numpy/anuga_parallel/parallel_api.py @ 6732

Last change on this file since 6732 was 5847, checked in by steve, 16 years ago

Changed parallel_api so that global mesh only needs to
be constructed on processor 0

File size: 7.1 KB
Line 
1"""Trying to lump parallel stuff into simpler interface
2
3
4"""
5
6from Numeric import zeros
7
8# The abstract Python-MPI interface
9from anuga_parallel.parallel_abstraction import size, rank, get_processor_name
10from anuga_parallel.parallel_abstraction import finalize, send, receive
11from anuga_parallel.parallel_abstraction import pypar_available, barrier
12
13
14# ANUGA parallel engine (only load if pypar can)
15if pypar_available:
16    from anuga_parallel.build_commun  import send_submesh
17    from anuga_parallel.build_commun  import rec_submesh
18    from anuga_parallel.build_commun  import extract_hostmesh
19    from anuga_parallel.parallel_shallow_water import Parallel_Domain
20
21    # Mesh partitioning using Metis
22    from anuga_parallel.build_submesh import build_submesh
23    from anuga_parallel.pmesh_divide  import pmesh_divide_metis
24
25
26#------------------------------------------------------------------------------
27# Read in processor information
28#------------------------------------------------------------------------------
29
30numprocs = size()
31myid = rank()
32processor_name = get_processor_name()
33print 'I am processor %d of %d on node %s' %(myid, numprocs, processor_name)
34
35
36
37
38def distribute(domain, verbose=False):
39    """ Distribute the domain to all processes
40    """
41
42
43    # FIXME: Dummy assignment (until boundaries are refactored to
44    # be independent of domains until they are applied)
45    if myid == 0:
46        bdmap = {}
47        for tag in domain.get_boundary_tags():
48            bdmap[tag] = None
49   
50   
51        domain.set_boundary(bdmap)
52
53
54
55
56    if not pypar_available: return domain # Bypass
57
58    # For some obscure reason this communication must happen prior to
59    # the more complex mesh distribution - Oh Well!
60    if myid == 0:
61        domain_name = domain.get_name()
62        domain_dir = domain.get_datadir()
63        georef = domain.geo_reference
64       
65        # FIXME - what other attributes need to be transferred?
66
67        for p in range(1, numprocs):
68            send((domain_name, domain_dir, georef), p)
69    else:
70        if verbose: print 'P%d: Receiving domain attributes' %(myid)
71
72        domain_name, domain_dir, georef = receive(0)
73
74
75
76    # Distribute boundary conditions
77    # FIXME: This cannot handle e.g. Time_boundaries due to
78    # difficulties pickling functions
79    if myid == 0:
80        boundary_map = domain.boundary_map
81        for p in range(1, numprocs):
82            send(boundary_map, p)
83    else:
84        if verbose: print 'P%d: Receiving boundary map' %(myid)       
85
86        boundary_map = receive(0)
87       
88
89
90
91    if myid == 0:
92        # Partition and distribute mesh.
93        # Structures returned is in the
94        # correct form for the ANUGA data structure
95
96
97        points, vertices, boundary, quantities,\
98                ghost_recv_dict, full_send_dict,\
99                number_of_full_nodes, number_of_full_triangles =\
100                distribute_mesh(domain)
101
102
103        if verbose: print 'Communication done'
104       
105    else:
106        # Read in the mesh partition that belongs to this
107        # processor
108        if verbose: print 'P%d: Receiving submeshes' %(myid)               
109        points, vertices, boundary, quantities,\
110                ghost_recv_dict, full_send_dict,\
111                number_of_full_nodes, number_of_full_triangles =\
112                rec_submesh(0)
113
114
115    #------------------------------------------------------------------------
116    # Build the domain for this processor using partion structures
117    #------------------------------------------------------------------------
118
119    domain = Parallel_Domain(points, vertices, boundary,
120                             full_send_dict=full_send_dict,
121                             ghost_recv_dict=ghost_recv_dict,
122                             number_of_full_nodes=number_of_full_nodes,
123                             number_of_full_triangles=number_of_full_triangles)
124
125    #------------------------------------------------------------------------
126    # Transfer initial conditions to each subdomain
127    #------------------------------------------------------------------------
128    for q in quantities:
129        domain.set_quantity(q, quantities[q]) 
130
131
132    #------------------------------------------------------------------------
133    # Transfer boundary conditions to each subdomain
134    #------------------------------------------------------------------------
135    boundary_map['ghost'] = None  # Add binding to ghost boundary
136    domain.set_boundary(boundary_map)
137
138
139    #------------------------------------------------------------------------
140    # Transfer other attributes to each subdomain
141    #------------------------------------------------------------------------
142    domain.set_name(domain_name)
143    domain.set_datadir(domain_dir)     
144    domain.geo_reference = georef   
145
146    #------------------------------------------------------------------------
147    # Return parallel domain to all nodes
148    #------------------------------------------------------------------------
149    return domain   
150
151
152
153
154
155
156def distribute_mesh(domain):
157
158    numprocs = size()
159
160   
161    # Subdivide the mesh
162    print 'Subdivide mesh'
163    nodes, triangles, boundary, triangles_per_proc, quantities = \
164           pmesh_divide_metis(domain, numprocs)
165
166
167    # Build the mesh that should be assigned to each processor,
168    # this includes ghost nodes and the communication pattern
169    print 'Build submeshes'   
170    submesh = build_submesh(nodes, triangles, boundary,\
171                            quantities, triangles_per_proc)
172
173    for p in range(numprocs):
174        N = len(submesh['ghost_nodes'][p])               
175        M = len(submesh['ghost_triangles'][p])
176        print 'There are %d ghost nodes and %d ghost triangles on proc %d'\
177              %(N, M, p)
178
179
180    # Send the mesh partition to the appropriate processor
181    print 'Distribute submeshes'       
182    for p in range(1, numprocs):
183      send_submesh(submesh, triangles_per_proc, p)
184
185    # Build the local mesh for processor 0
186    points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict =\
187              extract_hostmesh(submesh, triangles_per_proc)
188
189    # Keep track of the number full nodes and triangles.
190    # This is useful later if one needs access to a ghost-free domain
191    # Here, we do it for process 0. The others are done in rec_submesh.
192    number_of_full_nodes = len(submesh['full_nodes'][0])
193    number_of_full_triangles = len(submesh['full_triangles'][0])
194       
195    #print
196    #for p in range(numprocs):
197    #    print 'Process %d:' %(p)
198    #
199    #    print 'full_triangles:'
200    #    print submesh['full_triangles'][p]
201    #
202    #    print 'full_nodes:'
203    #    print submesh['full_nodes'][p]
204    #
205    #    print 'ghost_triangles:'
206    #    print submesh['ghost_triangles'][p]#
207    #
208    #    print 'ghost_nodes:'
209    #   print submesh['ghost_nodes'][p]                               
210    #    print
211    #
212    #print 'Receive dict'
213    #print ghost_recv_dict
214    #
215    #print 'Send dict'
216    #print full_send_dict       
217
218
219    # Return structures necessary for building the parallel domain
220    return points, vertices, boundary, quantities,\
221           ghost_recv_dict, full_send_dict,\
222           number_of_full_nodes, number_of_full_triangles
223   
224
225
226
Note: See TracBrowser for help on using the repository browser.