source: anuga_core/source/anuga_parallel/parallel_api.py @ 5437

Last change on this file since 5437 was 3928, checked in by ole, 17 years ago

Parallel domains now store only full triangles in sww files.
Still need to remove ghost nodes.

File size: 7.1 KB
Line 
1"""Trying to lump parallel stuff into simpler interface
2
3
4"""
5
6from Numeric import zeros
7
8# The abstract Python-MPI interface
9from anuga_parallel.parallel_abstraction import size, rank, get_processor_name
10from anuga_parallel.parallel_abstraction import finalize, send, receive
11from anuga_parallel.parallel_abstraction import pypar_available, barrier
12
13
14# ANUGA parallel engine (only load if pypar can)
15if pypar_available:
16    from anuga_parallel.build_commun  import send_submesh
17    from anuga_parallel.build_commun  import rec_submesh
18    from anuga_parallel.build_commun  import extract_hostmesh
19    from anuga_parallel.parallel_shallow_water import Parallel_Domain
20
21    # Mesh partitioning using Metis
22    from anuga_parallel.build_submesh import build_submesh
23    from anuga_parallel.pmesh_divide  import pmesh_divide_metis
24
25
26#------------------------------------------------------------------------------
27# Read in processor information
28#------------------------------------------------------------------------------
29
30numprocs = size()
31myid = rank()
32processor_name = get_processor_name()
33print 'I am processor %d of %d on node %s' %(myid, numprocs, processor_name)
34
35
36
37
38def distribute(domain, verbose=False):
39    """ Distribute the domain to all processes
40    """
41
42
43    # FIXME: Dummy assignment (until boundaries are refactored to
44    # be independent of domains until they are applied)
45    bdmap = {}
46    for tag in domain.get_boundary_tags():
47        bdmap[tag] = None
48   
49   
50    domain.set_boundary(bdmap)
51
52
53
54
55    if not pypar_available: return domain # Bypass
56
57    # For some obscure reason this communication must happen prior to
58    # the more complex mesh distribution - Oh Well!
59    if myid == 0:
60        domain_name = domain.get_name()
61        domain_dir = domain.get_datadir()
62        georef = domain.geo_reference
63       
64        # FIXME - what other attributes need to be transferred?
65
66        for p in range(1, numprocs):
67            send((domain_name, domain_dir, georef), p)
68    else:
69        if verbose: print 'P%d: Receiving domain attributes' %(myid)
70
71        domain_name, domain_dir, georef = receive(0)
72
73
74
75    # Distribute boundary conditions
76    # FIXME: This cannot handle e.g. Time_boundaries due to
77    # difficulties pickling functions
78    if myid == 0:
79        boundary_map = domain.boundary_map
80        for p in range(1, numprocs):
81            send(boundary_map, p)
82    else:
83        if verbose: print 'P%d: Receiving boundary map' %(myid)       
84
85        boundary_map = receive(0)
86       
87
88
89
90    if myid == 0:
91        # Partition and distribute mesh.
92        # Structures returned is in the
93        # correct form for the ANUGA data structure
94
95
96        points, vertices, boundary, quantities,\
97                ghost_recv_dict, full_send_dict,\
98                number_of_full_nodes, number_of_full_triangles =\
99                distribute_mesh(domain)
100
101
102        if verbose: print 'Communication done'
103       
104    else:
105        # Read in the mesh partition that belongs to this
106        # processor
107        if verbose: print 'P%d: Receiving submeshes' %(myid)               
108        points, vertices, boundary, quantities,\
109                ghost_recv_dict, full_send_dict,\
110                number_of_full_nodes, number_of_full_triangles =\
111                rec_submesh(0)
112
113
114    #------------------------------------------------------------------------
115    # Build the domain for this processor using partion structures
116    #------------------------------------------------------------------------
117
118    domain = Parallel_Domain(points, vertices, boundary,
119                             full_send_dict=full_send_dict,
120                             ghost_recv_dict=ghost_recv_dict,
121                             number_of_full_nodes=number_of_full_nodes,
122                             number_of_full_triangles=number_of_full_triangles)
123
124    #------------------------------------------------------------------------
125    # Transfer initial conditions to each subdomain
126    #------------------------------------------------------------------------
127    for q in quantities:
128        domain.set_quantity(q, quantities[q]) 
129
130
131    #------------------------------------------------------------------------
132    # Transfer boundary conditions to each subdomain
133    #------------------------------------------------------------------------
134    boundary_map['ghost'] = None  # Add binding to ghost boundary
135    domain.set_boundary(boundary_map)
136
137
138    #------------------------------------------------------------------------
139    # Transfer other attributes to each subdomain
140    #------------------------------------------------------------------------
141    domain.set_name(domain_name)
142    domain.set_datadir(domain_dir)     
143    domain.geo_reference = georef   
144
145    #------------------------------------------------------------------------
146    # Return parallel domain to all nodes
147    #------------------------------------------------------------------------
148    return domain   
149
150
151
152
153
154
155def distribute_mesh(domain):
156
157    numprocs = size()
158
159   
160    # Subdivide the mesh
161    print 'Subdivide mesh'
162    nodes, triangles, boundary, triangles_per_proc, quantities = \
163           pmesh_divide_metis(domain, numprocs)
164
165
166    # Build the mesh that should be assigned to each processor,
167    # this includes ghost nodes and the communication pattern
168    print 'Build submeshes'   
169    submesh = build_submesh(nodes, triangles, boundary,\
170                            quantities, triangles_per_proc)
171
172    for p in range(numprocs):
173        N = len(submesh['ghost_nodes'][p])               
174        M = len(submesh['ghost_triangles'][p])
175        print 'There are %d ghost nodes and %d ghost triangles on proc %d'\
176              %(N, M, p)
177
178
179    # Send the mesh partition to the appropriate processor
180    print 'Distribute submeshes'       
181    for p in range(1, numprocs):
182      send_submesh(submesh, triangles_per_proc, p)
183
184    # Build the local mesh for processor 0
185    points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict =\
186              extract_hostmesh(submesh, triangles_per_proc)
187
188    # Keep track of the number full nodes and triangles.
189    # This is useful later if one needs access to a ghost-free domain
190    # Here, we do it for process 0. The others are done in rec_submesh.
191    number_of_full_nodes = len(submesh['full_nodes'][0])
192    number_of_full_triangles = len(submesh['full_triangles'][0])
193       
194    #print
195    #for p in range(numprocs):
196    #    print 'Process %d:' %(p)
197    #
198    #    print 'full_triangles:'
199    #    print submesh['full_triangles'][p]
200    #
201    #    print 'full_nodes:'
202    #    print submesh['full_nodes'][p]
203    #
204    #    print 'ghost_triangles:'
205    #    print submesh['ghost_triangles'][p]#
206    #
207    #    print 'ghost_nodes:'
208    #   print submesh['ghost_nodes'][p]                               
209    #    print
210    #
211    #print 'Receive dict'
212    #print ghost_recv_dict
213    #
214    #print 'Send dict'
215    #print full_send_dict       
216
217
218    # Return structures necessary for building the parallel domain
219    return points, vertices, boundary, quantities,\
220           ghost_recv_dict, full_send_dict,\
221           number_of_full_nodes, number_of_full_triangles
222   
223
224
225
Note: See TracBrowser for help on using the repository browser.