source: anuga_core/source/anuga_parallel/parallel_api.py @ 3884

Last change on this file since 3884 was 3884, checked in by ole, 17 years ago

Allowed parallel abstraction to work sequentially without Metis

File size: 5.6 KB
Line 
1"""Trying to lump parallel stuff into simpler interface
2
3
4"""
5
6# Parallelism
7
8# The abstract Python-MPI interface
9from anuga_parallel.parallel_abstraction import size, rank, get_processor_name
10from anuga_parallel.parallel_abstraction import finalize, send, receive
11from anuga_parallel.parallel_abstraction import pypar_available
12
13
14# ANUGA parallel engine (only load if pypar can)
15if pypar_available:
16    from anuga_parallel.build_commun  import send_submesh
17    from anuga_parallel.build_commun  import rec_submesh
18    from anuga_parallel.build_commun  import extract_hostmesh
19    from anuga_parallel.parallel_shallow_water import Parallel_Domain
20
21    # Mesh partitioning using Metis
22    from anuga_parallel.build_submesh import build_submesh
23    from anuga_parallel.build_local   import build_local_mesh
24    from anuga_parallel.pmesh_divide  import pmesh_divide_metis
25
26
27#------------------------------------------------------------------------------
28# Read in processor information
29#------------------------------------------------------------------------------
30
31numprocs = size()
32myid = rank()
33processor_name = get_processor_name()
34print 'I am processor %d of %d on node %s' %(myid, numprocs, processor_name)
35
36
37
38
39def distribute(domain, verbose=False):
40    """ Distribute the domain to all processes
41    """
42
43
44    # FIXME: Dummy assignment (until boundaries are refactored to
45    # be independent of domains until they are applied)
46    bdmap = {}
47    for tag in domain.get_boundary_tags():
48        bdmap[tag] = None
49   
50   
51    domain.set_boundary(bdmap)
52
53
54
55
56    if not pypar_available: return domain # Bypass
57
58    # For some obscure reason this communication must happen prior to
59    # the more complex mesh distribution - Oh Well!
60    if myid == 0:
61        domain_name = domain.get_name()
62        domain_dir = domain.get_datadir()
63        # FIXME - what other attributes need to be transferred?
64
65        for p in range(1, numprocs):
66            send((domain_name, domain_dir), p)
67    else:
68        if verbose: print 'P%d: Receiving domain attributes' %(myid)
69
70        domain_name, domain_dir = receive(0)
71
72
73
74    # Distribute boundary conditions
75    # FIXME: This cannot handle e.g. Time_boundaries due to
76    # difficulties pickling functions
77    if myid == 0:
78        boundary_map = domain.boundary_map
79        for p in range(1, numprocs):
80            send(boundary_map, p)
81    else:
82        if verbose: print 'P%d: Receiving boundary map' %(myid)       
83
84        boundary_map = receive(0)
85       
86
87
88
89    if myid == 0:
90        # Partition and distribute mesh.
91        # Structures returned is in the
92        # correct form for the ANUGA data structure
93
94
95        points, vertices, boundary, quantities,\
96                ghost_recv_dict, full_send_dict,\
97                = distribute_mesh(domain)
98
99        if verbose: print 'Communication done'
100       
101    else:
102        # Read in the mesh partition that belongs to this
103        # processor
104        if verbose: print 'P%d: Receiving submeshes' %(myid)               
105        points, vertices, boundary, quantities,\
106                ghost_recv_dict, full_send_dict,\
107                = rec_submesh(0)
108
109
110    #------------------------------------------------------------------------
111    # Build the domain for this processor using partion structures
112    #------------------------------------------------------------------------
113    domain = Parallel_Domain(points, vertices, boundary,
114                             full_send_dict  = full_send_dict,
115                             ghost_recv_dict = ghost_recv_dict)
116
117    #------------------------------------------------------------------------
118    # Transfer initial conditions to each subdomain
119    #------------------------------------------------------------------------
120    for q in quantities:
121        domain.set_quantity(q, quantities[q]) 
122
123
124    #------------------------------------------------------------------------
125    # Transfer boundary conditions to each subdomain
126    #------------------------------------------------------------------------
127    boundary_map['ghost'] = None  # Add binding to ghost boundary
128    domain.set_boundary(boundary_map)
129
130
131    #------------------------------------------------------------------------
132    # Transfer other attributes to each subdomain
133    #------------------------------------------------------------------------
134    domain.set_name(domain_name)
135    domain.set_datadir(domain_dir)       
136
137    #------------------------------------------------------------------------
138    # Return parallel domain to all nodes
139    #------------------------------------------------------------------------
140    return domain   
141
142
143
144
145
146
147
148def distribute_mesh(domain):
149
150    numprocs = size()
151
152   
153    # Subdivide the mesh
154    print 'Subdivide mesh'
155    nodes, triangles, boundary, triangles_per_proc, quantities = \
156           pmesh_divide_metis(domain, numprocs)
157
158
159    # Build the mesh that should be assigned to each processor,
160    # this includes ghost nodes and the communication pattern
161    print 'Build submeshes'   
162    submesh = build_submesh(nodes, triangles, boundary,\
163                            quantities, triangles_per_proc)
164
165    # Send the mesh partition to the appropriate processor
166    print 'Distribute submeshes'       
167    for p in range(1, numprocs):
168      send_submesh(submesh, triangles_per_proc, p)
169
170    # Build the local mesh for processor 0
171    points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict = \
172              extract_hostmesh(submesh, triangles_per_proc)
173
174    # Return structures necessary for building the parallel domain
175    return points, vertices, boundary, quantities, \
176           ghost_recv_dict, full_send_dict
177   
178
179
180
Note: See TracBrowser for help on using the repository browser.