source: trunk/anuga_core/source/anuga_parallel/parallel_api.py @ 8272

Last change on this file since 8272 was 8272, checked in by steve, 11 years ago

added sww_merge to parallel_domain functions

File size: 8.1 KB
Line 
1"""Trying to lump parallel stuff into simpler interface
2
3
4"""
5
6
7
8# The abstract Python-MPI interface
9from anuga_parallel.parallel_abstraction import size, rank, get_processor_name
10from anuga_parallel.parallel_abstraction import finalize, send, receive
11from anuga_parallel.parallel_abstraction import pypar_available, barrier
12
13
14# ANUGA parallel engine (only load if pypar can)
15if pypar_available:
16    from anuga_parallel.distribute_mesh  import send_submesh
17    from anuga_parallel.distribute_mesh  import rec_submesh
18    from anuga_parallel.distribute_mesh  import extract_hostmesh
19
20    # Mesh partitioning using Metis
21    from anuga_parallel.distribute_mesh import build_submesh
22    from anuga_parallel.distribute_mesh import pmesh_divide_metis_with_map
23
24    from anuga_parallel.parallel_shallow_water import Parallel_domain
25
26#------------------------------------------------------------------------------
27# Read in processor information
28#------------------------------------------------------------------------------
29
30numprocs = size()
31myid = rank()
32processor_name = get_processor_name()
33#print 'I am processor %d of %d on node %s' %(myid, numprocs, processor_name)
34
35
36
37
38def distribute(domain, verbose=False):
39    """ Distribute the domain to all processes
40    """
41
42    barrier()
43
44    # FIXME: Dummy assignment (until boundaries are refactored to
45    # be independent of domains until they are applied)
46    if myid == 0:
47        bdmap = {}
48        for tag in domain.get_boundary_tags():
49            bdmap[tag] = None
50   
51   
52        domain.set_boundary(bdmap)
53
54
55
56
57    if not pypar_available: return domain # Bypass
58
59    # For some obscure reason this communication must happen prior to
60    # the more complex mesh distribution - Oh Well!
61    if myid == 0:
62        domain_name = domain.get_name()
63        domain_dir = domain.get_datadir()
64        georef = domain.geo_reference
65       
66        # FIXME - what other attributes need to be transferred?
67
68        for p in range(1, numprocs):
69            send((domain_name, domain_dir, georef), p)
70    else:
71        if verbose: print 'P%d: Receiving domain attributes' %(myid)
72
73        domain_name, domain_dir, georef = receive(0)
74
75
76
77    # Distribute boundary conditions
78    # FIXME: This cannot handle e.g. Time_boundaries due to
79    # difficulties pickling functions
80    if myid == 0:
81        boundary_map = domain.boundary_map
82        for p in range(1, numprocs):
83            send(boundary_map, p)
84    else:
85        if verbose: print 'P%d: Receiving boundary map' %(myid)       
86
87        boundary_map = receive(0)
88       
89
90
91
92    if myid == 0:
93        # Partition and distribute mesh.
94        # Structures returned is in the
95        # correct form for the ANUGA data structure
96
97
98        points, vertices, boundary, quantities,\
99                ghost_recv_dict, full_send_dict,\
100                number_of_full_nodes, number_of_full_triangles,\
101                s2p_map, p2s_map =\
102                distribute_mesh(domain, verbose=verbose)
103
104        # Send serial to parallel (s2p) and parallel to serial (p2s) triangle mapping to proc 1 .. numprocs
105        for p in range(1, numprocs):
106            send(s2p_map, p)
107            send(p2s_map, p)
108
109        if verbose: print 'Communication done'
110       
111    else:
112        # Read in the mesh partition that belongs to this
113        # processor
114        if verbose: print 'P%d: Receiving submeshes' %(myid)               
115        points, vertices, boundary, quantities,\
116                ghost_recv_dict, full_send_dict,\
117                number_of_full_nodes, number_of_full_triangles =\
118                rec_submesh(0, verbose)
119
120        # Recieve serial to parallel (s2p) and parallel to serial (p2s) triangle mapping
121        s2p_map = receive(0)
122        p2s_map = receive(0)
123
124
125    #------------------------------------------------------------------------
126    # Build the domain for this processor using partion structures
127    #------------------------------------------------------------------------
128
129    if verbose: print 'myid = %g, no_full_nodes = %g, no_full_triangles = %g' % (myid, number_of_full_nodes, number_of_full_triangles)
130
131   
132    domain = Parallel_domain(points, vertices, boundary,
133                             full_send_dict=full_send_dict,
134                             ghost_recv_dict=ghost_recv_dict,
135                             number_of_full_nodes=number_of_full_nodes,
136                             number_of_full_triangles=number_of_full_triangles,
137                             geo_reference=georef,
138                             tri_map = s2p_map,
139                             inv_tri_map = p2s_map) ## jj added this
140
141    #------------------------------------------------------------------------
142    # Transfer initial conditions to each subdomain
143    #------------------------------------------------------------------------
144    for q in quantities:
145        domain.set_quantity(q, quantities[q]) 
146
147
148    #------------------------------------------------------------------------
149    # Transfer boundary conditions to each subdomain
150    #------------------------------------------------------------------------
151    boundary_map['ghost'] = None  # Add binding to ghost boundary
152    domain.set_boundary(boundary_map)
153
154
155    #------------------------------------------------------------------------
156    # Transfer other attributes to each subdomain
157    #------------------------------------------------------------------------
158    domain.set_name(domain_name)
159    domain.set_datadir(domain_dir)     
160    domain.geo_reference = georef   
161
162    #------------------------------------------------------------------------
163    # Return parallel domain to all nodes
164    #------------------------------------------------------------------------
165    return domain   
166
167
168
169
170
171
172def distribute_mesh(domain, verbose=False):
173
174    numprocs = size()
175
176   
177    # Subdivide the mesh
178    if verbose: print 'Subdivide mesh'
179    nodes, triangles, boundary, triangles_per_proc, quantities, s2p_map, p2s_map = \
180           pmesh_divide_metis_with_map(domain, numprocs)
181
182    #PETE: s2p_map (maps serial domain triangles to parallel domain triangles)
183    #p2_map (maps parallel domain triangles to domain triangles)
184
185
186    # Build the mesh that should be assigned to each processor,
187    # this includes ghost nodes and the communication pattern
188    if verbose: print 'Build submeshes'   
189    submesh = build_submesh(nodes, triangles, boundary,\
190                            quantities, triangles_per_proc)
191
192    if verbose:
193        for p in range(numprocs):
194            N = len(submesh['ghost_nodes'][p])               
195            M = len(submesh['ghost_triangles'][p])
196            print 'There are %d ghost nodes and %d ghost triangles on proc %d'\
197                  %(N, M, p)
198
199
200    # Send the mesh partition to the appropriate processor
201    if verbose: print 'Distribute submeshes'       
202    for p in range(1, numprocs):
203      send_submesh(submesh, triangles_per_proc, p, verbose)
204
205    # Build the local mesh for processor 0
206    points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict =\
207              extract_hostmesh(submesh, triangles_per_proc)
208
209    # Keep track of the number full nodes and triangles.
210    # This is useful later if one needs access to a ghost-free domain
211    # Here, we do it for process 0. The others are done in rec_submesh.
212    number_of_full_nodes = len(submesh['full_nodes'][0])
213    number_of_full_triangles = len(submesh['full_triangles'][0])
214       
215    #print
216    #for p in range(numprocs):
217    #    print 'Process %d:' %(p)
218    #
219    #    print 'full_triangles:'
220    #    print submesh['full_triangles'][p]
221    #
222    #    print 'full_nodes:'
223    #    print submesh['full_nodes'][p]
224    #
225    #    print 'ghost_triangles:'
226    #    print submesh['ghost_triangles'][p]#
227    #
228    #    print 'ghost_nodes:'
229    #   print submesh['ghost_nodes'][p]                               
230    #    print
231    #
232    #print 'Receive dict'
233    #print ghost_recv_dict
234    #
235    #print 'Send dict'
236    #print full_send_dict       
237
238
239    # Return structures necessary for building the parallel domain
240    return points, vertices, boundary, quantities,\
241           ghost_recv_dict, full_send_dict,\
242           number_of_full_nodes, number_of_full_triangles, s2p_map, p2s_map
243   
244
245
246
Note: See TracBrowser for help on using the repository browser.