source: anuga_core/source/anuga_parallel/parallel_api.py @ 3635

Last change on this file since 3635 was 3635, checked in by ole, 17 years ago

Work on parallel api and Karratha study

File size: 5.0 KB
Line 
1"""Trying to lump parallel stuff into simpler interface
2
3
4"""
5
6# Parallelism
7# The Python-MPI interface
8from pypar import size, rank, get_processor_name, finalize, send, receive
9#from anuga_parallel.parallel_abstraction import size, rank, get_processor_name, finalize #,send, receive
10from anuga_parallel.pmesh_divide  import pmesh_divide_metis
11from anuga_parallel.build_submesh import build_submesh
12from anuga_parallel.build_local   import build_local_mesh
13from anuga_parallel.build_commun  import send_submesh, rec_submesh, extract_hostmesh
14from anuga_parallel.parallel_shallow_water import Parallel_Domain
15
16
17#------------------------------------------------------------------------------
18# Read in processor information
19#------------------------------------------------------------------------------
20
21numprocs = size()
22myid = rank()
23processor_name = get_processor_name()
24print 'I am processor %d of %d on node %s' %(myid, numprocs, processor_name)
25
26
27
28
29def distribute(domain, verbose=False):
30    """ Distribute the domain to all processes
31    """
32
33    # For some obscure reason this communication must happen prior to
34    # the more complex mesh distribution - Oh Well!
35    if myid == 0:
36        domain_name = domain.get_name()
37        domain_dir = domain.get_datadir()
38        # FIXME - what other attributes need to be transferred?
39
40        for p in range(1, numprocs):
41            send((domain_name, domain_dir), p)
42    else:
43        if verbose: print 'P%d: Receiving domain attributes' %(myid)
44
45        domain_name, domain_dir = receive(0)
46
47
48
49    # Distribute boundary conditions
50    # FIXME: This cannot handle e.g. Time_boundaries due to
51    # difficulties pickling functions
52    if myid == 0:
53        boundary_map = domain.boundary_map
54        for p in range(1, numprocs):
55            send(boundary_map, p)
56    else:
57        if verbose: print 'P%d: Receiving boundary map' %(myid)       
58
59        boundary_map = receive(0)
60       
61
62
63
64    if myid == 0:
65        # Partition and distribute mesh.
66        # Structures returned is in the
67        # correct form for the ANUGA data structure
68
69
70        points, vertices, boundary, quantities,\
71                ghost_recv_dict, full_send_dict,\
72                = distribute_mesh(domain)
73
74        if verbose: print 'Communication done'
75       
76    else:
77        # Read in the mesh partition that belongs to this
78        # processor
79        if verbose: print 'P%d: Receiving submeshes' %(myid)               
80        points, vertices, boundary, quantities,\
81                ghost_recv_dict, full_send_dict,\
82                = rec_submesh(0)
83
84
85    #------------------------------------------------------------------------
86    # Build the domain for this processor using partion structures
87    #------------------------------------------------------------------------
88    domain = Parallel_Domain(points, vertices, boundary,
89                             full_send_dict  = full_send_dict,
90                             ghost_recv_dict = ghost_recv_dict)
91
92    #------------------------------------------------------------------------
93    # Transfer initial conditions to each subdomain
94    #------------------------------------------------------------------------
95    for q in quantities:
96        domain.set_quantity(q, quantities[q]) 
97
98
99    #------------------------------------------------------------------------
100    # Transfer boundary conditions to each subdomain
101    #------------------------------------------------------------------------
102    boundary_map['ghost'] = None  # Add binding to ghost boundary
103    domain.set_boundary(boundary_map)
104
105
106    #------------------------------------------------------------------------
107    # Transfer other attributes to each subdomain
108    #------------------------------------------------------------------------
109    domain.set_name(domain_name)
110    domain.set_datadir(domain_dir)       
111
112    #------------------------------------------------------------------------
113    # Return parallel domain to all nodes
114    #------------------------------------------------------------------------
115    return domain   
116
117
118
119
120
121
122
123def distribute_mesh(domain):
124
125    numprocs = size()
126
127   
128    # Subdivide the mesh
129    print 'Subdivide mesh'
130    nodes, triangles, boundary, triangles_per_proc, quantities = \
131           pmesh_divide_metis(domain, numprocs)
132
133    # Build the mesh that should be assigned to each processor,
134    # this includes ghost nodes and the communicaiton pattern
135    print 'Build submeshes'   
136    submesh = build_submesh(nodes, triangles, boundary,\
137                            quantities, triangles_per_proc)
138
139    # Send the mesh partition to the appropriate processor
140    print 'Distribute submeshes'       
141    for p in range(1, numprocs):
142      send_submesh(submesh, triangles_per_proc, p)
143
144    # Build the local mesh for processor 0
145    points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict = \
146              extract_hostmesh(submesh, triangles_per_proc)
147
148    # Return structures necessary for building the parallel domain
149    return points, vertices, boundary, quantities, \
150           ghost_recv_dict, full_send_dict
151   
152
153
154
Note: See TracBrowser for help on using the repository browser.