source: trunk/anuga_core/source/anuga_parallel/parallel_api.py @ 8281

Last change on this file since 8281 was 8281, checked in by steve, 11 years ago

added in some parallel data to allow sww_merge to create a merged sww file

File size: 9.2 KB
Line 
1"""Trying to lump parallel stuff into simpler interface
2
3
4"""
5
6
7
8# The abstract Python-MPI interface
9from anuga_parallel.parallel_abstraction import size, rank, get_processor_name
10from anuga_parallel.parallel_abstraction import finalize, send, receive
11from anuga_parallel.parallel_abstraction import pypar_available, barrier
12
13
14# ANUGA parallel engine (only load if pypar can)
15if pypar_available:
16    from anuga_parallel.distribute_mesh  import send_submesh
17    from anuga_parallel.distribute_mesh  import rec_submesh
18    from anuga_parallel.distribute_mesh  import extract_hostmesh
19
20    # Mesh partitioning using Metis
21    from anuga_parallel.distribute_mesh import build_submesh
22    from anuga_parallel.distribute_mesh import pmesh_divide_metis_with_map
23
24    from anuga_parallel.parallel_shallow_water import Parallel_domain
25
26#------------------------------------------------------------------------------
27# Read in processor information
28#------------------------------------------------------------------------------
29
30numprocs = size()
31myid = rank()
32processor_name = get_processor_name()
33#print 'I am processor %d of %d on node %s' %(myid, numprocs, processor_name)
34
35
36
37
38def distribute(domain, verbose=False):
39    """ Distribute the domain to all processes
40    """
41
42    barrier()
43
44    # FIXME: Dummy assignment (until boundaries are refactored to
45    # be independent of domains until they are applied)
46    if myid == 0:
47        bdmap = {}
48        for tag in domain.get_boundary_tags():
49            bdmap[tag] = None
50   
51   
52        domain.set_boundary(bdmap)
53
54
55
56
57    if not pypar_available: return domain # Bypass
58
59    # For some obscure reason this communication must happen prior to
60    # the more complex mesh distribution - Oh Well!
61    if myid == 0:
62        domain_name = domain.get_name()
63        domain_dir = domain.get_datadir()
64        georef = domain.geo_reference
65       
66        # FIXME - what other attributes need to be transferred?
67
68        for p in range(1, numprocs):
69            send((domain_name, domain_dir, georef), p)
70    else:
71        if verbose: print 'P%d: Receiving domain attributes' %(myid)
72
73        domain_name, domain_dir, georef = receive(0)
74
75
76
77    # Distribute boundary conditions
78    # FIXME: This cannot handle e.g. Time_boundaries due to
79    # difficulties pickling functions
80    if myid == 0:
81        boundary_map = domain.boundary_map
82        for p in range(1, numprocs):
83            send(boundary_map, p)
84    else:
85        if verbose: print 'P%d: Receiving boundary map' %(myid)       
86
87        boundary_map = receive(0)
88       
89
90
91
92    if myid == 0:
93        # Partition and distribute mesh.
94        # Structures returned is in the
95        # correct form for the ANUGA data structure
96
97
98        points, vertices, boundary, quantities,\
99                ghost_recv_dict, full_send_dict,\
100                number_of_full_nodes, number_of_full_triangles,\
101                s2p_map, p2s_map, tri_map, node_map =\
102                distribute_mesh(domain, verbose=verbose)
103
104        number_of_global_triangles = len(tri_map)
105        number_of_global_nodes = len(node_map)
106
107        # Extract l2g maps
108        tri_l2g  = extract_l2g_map(tri_map)
109        node_l2g = extract_l2g_map(node_map)
110
111        # Send serial to parallel (s2p) and parallel to serial (p2s) triangle mapping to proc 1 .. numprocs
112        for p in range(1, numprocs):
113            send(s2p_map, p)
114            send(p2s_map, p)
115
116        if verbose: print 'Communication done'
117       
118    else:
119        # Read in the mesh partition that belongs to this
120        # processor
121        if verbose: print 'P%d: Receiving submeshes' %(myid)               
122        points, vertices, boundary, quantities,\
123                ghost_recv_dict, full_send_dict,\
124                number_of_full_nodes, number_of_full_triangles, \
125                tri_map, node_map =\
126                rec_submesh(0, verbose)
127
128        number_of_global_triangles = len(tri_map)
129        number_of_global_nodes = len(node_map)
130
131        # Extract l2g maps
132        tri_l2g  = extract_l2g_map(tri_map)
133        node_l2g = extract_l2g_map(node_map)
134       
135        # Recieve serial to parallel (s2p) and parallel to serial (p2s) triangle mapping
136        s2p_map = receive(0)
137        p2s_map = receive(0)
138
139
140    #------------------------------------------------------------------------
141    # Build the domain for this processor using partion structures
142    #------------------------------------------------------------------------
143
144    if verbose: print 'myid = %g, no_full_nodes = %g, no_full_triangles = %g' % (myid, number_of_full_nodes, number_of_full_triangles)
145
146   
147    domain = Parallel_domain(points, vertices, boundary,
148                             full_send_dict=full_send_dict,
149                             ghost_recv_dict=ghost_recv_dict,
150                             number_of_full_nodes=number_of_full_nodes,
151                             number_of_full_triangles=number_of_full_triangles,
152                             geo_reference=georef,
153                             number_of_global_triangles = number_of_global_triangles,
154                             number_of_global_nodes = number_of_global_nodes,
155                             s2p_map = s2p_map,
156                             p2s_map = p2s_map, ## jj added this
157                             tri_l2g = tri_l2g, ## SR added this
158                             node_l2g = node_l2g)
159
160    #------------------------------------------------------------------------
161    # Transfer initial conditions to each subdomain
162    #------------------------------------------------------------------------
163    for q in quantities:
164        domain.set_quantity(q, quantities[q]) 
165
166
167    #------------------------------------------------------------------------
168    # Transfer boundary conditions to each subdomain
169    #------------------------------------------------------------------------
170    boundary_map['ghost'] = None  # Add binding to ghost boundary
171    domain.set_boundary(boundary_map)
172
173
174    #------------------------------------------------------------------------
175    # Transfer other attributes to each subdomain
176    #------------------------------------------------------------------------
177    domain.set_name(domain_name)
178    domain.set_datadir(domain_dir)     
179    domain.geo_reference = georef   
180
181    #------------------------------------------------------------------------
182    # Return parallel domain to all nodes
183    #------------------------------------------------------------------------
184    return domain   
185
186
187
188
189
190
191def distribute_mesh(domain, verbose=False):
192
193    numprocs = size()
194
195   
196    # Subdivide the mesh
197    if verbose: print 'Subdivide mesh'
198    nodes, triangles, boundary, triangles_per_proc, quantities, \
199           s2p_map, p2s_map = \
200           pmesh_divide_metis_with_map(domain, numprocs)
201
202    #PETE: s2p_map (maps serial domain triangles to parallel domain triangles)
203    #      sp2_map (maps parallel domain triangles to domain triangles)
204
205
206    # Build the mesh that should be assigned to each processor,
207    # this includes ghost nodes and the communication pattern
208    if verbose: print 'Build submeshes'   
209    submesh = build_submesh(nodes, triangles, boundary,\
210                            quantities, triangles_per_proc)
211
212    if verbose:
213        for p in range(numprocs):
214            N = len(submesh['ghost_nodes'][p])               
215            M = len(submesh['ghost_triangles'][p])
216            print 'There are %d ghost nodes and %d ghost triangles on proc %d'\
217                  %(N, M, p)
218
219
220    # Send the mesh partition to the appropriate processor
221    if verbose: print 'Distribute submeshes'       
222    for p in range(1, numprocs):
223      send_submesh(submesh, triangles_per_proc, p, verbose)
224
225    # Build the local mesh for processor 0
226    points, vertices, boundary, quantities, \
227            ghost_recv_dict, full_send_dict, tri_map, node_map =\
228              extract_hostmesh(submesh, triangles_per_proc)
229
230    # Keep track of the number full nodes and triangles.
231    # This is useful later if one needs access to a ghost-free domain
232    # Here, we do it for process 0. The others are done in rec_submesh.
233    number_of_full_nodes = len(submesh['full_nodes'][0])
234    number_of_full_triangles = len(submesh['full_triangles'][0])
235       
236    #print
237    #for p in range(numprocs):
238    #    print 'Process %d:' %(p)
239    #
240    #    print 'full_triangles:'
241    #    print submesh['full_triangles'][p]
242    #
243    #    print 'full_nodes:'
244    #    print submesh['full_nodes'][p]
245    #
246    #    print 'ghost_triangles:'
247    #    print submesh['ghost_triangles'][p]#
248    #
249    #    print 'ghost_nodes:'
250    #   print submesh['ghost_nodes'][p]                               
251    #    print
252    #
253    #print 'Receive dict'
254    #print ghost_recv_dict
255    #
256    #print 'Send dict'
257    #print full_send_dict       
258
259
260    # Return structures necessary for building the parallel domain
261    return points, vertices, boundary, quantities,\
262           ghost_recv_dict, full_send_dict,\
263           number_of_full_nodes, number_of_full_triangles, \
264           s2p_map, p2s_map, tri_map, node_map
265   
266
267
268def extract_l2g_map(map):
269    # Extract l2g_map
270
271    import numpy as num
272   
273    b = num.arange(len(map))
274
275    l_ids = num.extract(map>-1,map)
276    g_ids = num.extract(map>-1,b)
277
278#    print len(g_ids)
279#    print len(l_ids)
280#    print l_ids
281
282    l2g = num.zeros_like(g_ids)
283    l2g[l_ids] = g_ids
284
285    return l2g
286
Note: See TracBrowser for help on using the repository browser.