source: trunk/anuga_core/source/anuga_parallel/parallel_api.py @ 8281

Last change on this file since 8281 was 8281, checked in by steve, 12 years ago

added in some parallel data to allow sww_merge to create a merged sww file

File size: 9.2 KB
RevLine 
[3584]1"""Trying to lump parallel stuff into simpler interface
2
[3585]3
[3584]4"""
[3585]5
[3776]6
[7400]7
[3776]8# The abstract Python-MPI interface
9from anuga_parallel.parallel_abstraction import size, rank, get_processor_name
10from anuga_parallel.parallel_abstraction import finalize, send, receive
[3904]11from anuga_parallel.parallel_abstraction import pypar_available, barrier
[3776]12
[3585]13
[3776]14# ANUGA parallel engine (only load if pypar can)
15if pypar_available:
[7448]16    from anuga_parallel.distribute_mesh  import send_submesh
17    from anuga_parallel.distribute_mesh  import rec_submesh
18    from anuga_parallel.distribute_mesh  import extract_hostmesh
[3585]19
[3884]20    # Mesh partitioning using Metis
[7448]21    from anuga_parallel.distribute_mesh import build_submesh
[8260]22    from anuga_parallel.distribute_mesh import pmesh_divide_metis_with_map
[3776]23
[7449]24    from anuga_parallel.parallel_shallow_water import Parallel_domain
[3884]25
[3585]26#------------------------------------------------------------------------------
27# Read in processor information
28#------------------------------------------------------------------------------
29
[3628]30numprocs = size()
31myid = rank()
32processor_name = get_processor_name()
[7449]33#print 'I am processor %d of %d on node %s' %(myid, numprocs, processor_name)
[3585]34
35
36
37
[3595]38def distribute(domain, verbose=False):
39    """ Distribute the domain to all processes
40    """
[3593]41
[8272]42    barrier()
[3818]43
44    # FIXME: Dummy assignment (until boundaries are refactored to
45    # be independent of domains until they are applied)
[5847]46    if myid == 0:
47        bdmap = {}
48        for tag in domain.get_boundary_tags():
49            bdmap[tag] = None
[3818]50   
51   
[5847]52        domain.set_boundary(bdmap)
[3818]53
54
55
56
[3776]57    if not pypar_available: return domain # Bypass
58
[3595]59    # For some obscure reason this communication must happen prior to
60    # the more complex mesh distribution - Oh Well!
[3593]61    if myid == 0:
[3595]62        domain_name = domain.get_name()
[3612]63        domain_dir = domain.get_datadir()
[3904]64        georef = domain.geo_reference
65       
[3612]66        # FIXME - what other attributes need to be transferred?
67
[3595]68        for p in range(1, numprocs):
[3904]69            send((domain_name, domain_dir, georef), p)
[3595]70    else:
[3612]71        if verbose: print 'P%d: Receiving domain attributes' %(myid)
[3593]72
[3904]73        domain_name, domain_dir, georef = receive(0)
[3595]74
75
[3612]76
[3624]77    # Distribute boundary conditions
78    # FIXME: This cannot handle e.g. Time_boundaries due to
79    # difficulties pickling functions
[3595]80    if myid == 0:
[3612]81        boundary_map = domain.boundary_map
82        for p in range(1, numprocs):
[3628]83            send(boundary_map, p)
[3612]84    else:
85        if verbose: print 'P%d: Receiving boundary map' %(myid)       
86
[3628]87        boundary_map = receive(0)
[3612]88       
89
90
91
92    if myid == 0:
[3595]93        # Partition and distribute mesh.
94        # Structures returned is in the
95        # correct form for the ANUGA data structure
96
97
[3593]98        points, vertices, boundary, quantities,\
99                ghost_recv_dict, full_send_dict,\
[8260]100                number_of_full_nodes, number_of_full_triangles,\
[8281]101                s2p_map, p2s_map, tri_map, node_map =\
[7449]102                distribute_mesh(domain, verbose=verbose)
[3595]103
[8281]104        number_of_global_triangles = len(tri_map)
105        number_of_global_nodes = len(node_map)
106
107        # Extract l2g maps
108        tri_l2g  = extract_l2g_map(tri_map)
109        node_l2g = extract_l2g_map(node_map)
110
[8260]111        # Send serial to parallel (s2p) and parallel to serial (p2s) triangle mapping to proc 1 .. numprocs
112        for p in range(1, numprocs):
113            send(s2p_map, p)
114            send(p2s_map, p)
[3926]115
[3595]116        if verbose: print 'Communication done'
[3593]117       
118    else:
119        # Read in the mesh partition that belongs to this
[3595]120        # processor
[3612]121        if verbose: print 'P%d: Receiving submeshes' %(myid)               
[3593]122        points, vertices, boundary, quantities,\
123                ghost_recv_dict, full_send_dict,\
[8281]124                number_of_full_nodes, number_of_full_triangles, \
125                tri_map, node_map =\
[7449]126                rec_submesh(0, verbose)
[3593]127
[8281]128        number_of_global_triangles = len(tri_map)
129        number_of_global_nodes = len(node_map)
130
131        # Extract l2g maps
132        tri_l2g  = extract_l2g_map(tri_map)
133        node_l2g = extract_l2g_map(node_map)
134       
[8260]135        # Recieve serial to parallel (s2p) and parallel to serial (p2s) triangle mapping
136        s2p_map = receive(0)
137        p2s_map = receive(0)
[3595]138
[8260]139
[3593]140    #------------------------------------------------------------------------
[3595]141    # Build the domain for this processor using partion structures
[3593]142    #------------------------------------------------------------------------
[3926]143
[7449]144    if verbose: print 'myid = %g, no_full_nodes = %g, no_full_triangles = %g' % (myid, number_of_full_nodes, number_of_full_triangles)
[6721]145
146   
[7449]147    domain = Parallel_domain(points, vertices, boundary,
[3926]148                             full_send_dict=full_send_dict,
149                             ghost_recv_dict=ghost_recv_dict,
150                             number_of_full_nodes=number_of_full_nodes,
[8114]151                             number_of_full_triangles=number_of_full_triangles,
[8260]152                             geo_reference=georef,
[8281]153                             number_of_global_triangles = number_of_global_triangles,
154                             number_of_global_nodes = number_of_global_nodes,
155                             s2p_map = s2p_map,
156                             p2s_map = p2s_map, ## jj added this
157                             tri_l2g = tri_l2g, ## SR added this
158                             node_l2g = node_l2g)
[3593]159
160    #------------------------------------------------------------------------
[3595]161    # Transfer initial conditions to each subdomain
[3593]162    #------------------------------------------------------------------------
163    for q in quantities:
[3595]164        domain.set_quantity(q, quantities[q]) 
[3593]165
166
167    #------------------------------------------------------------------------
[3612]168    # Transfer boundary conditions to each subdomain
[3595]169    #------------------------------------------------------------------------
[3612]170    boundary_map['ghost'] = None  # Add binding to ghost boundary
171    domain.set_boundary(boundary_map)
[3595]172
173
174    #------------------------------------------------------------------------
[3612]175    # Transfer other attributes to each subdomain
176    #------------------------------------------------------------------------
177    domain.set_name(domain_name)
[3904]178    domain.set_datadir(domain_dir)     
179    domain.geo_reference = georef   
[3612]180
181    #------------------------------------------------------------------------
[3593]182    # Return parallel domain to all nodes
183    #------------------------------------------------------------------------
184    return domain   
185
186
187
188
189
190
[7449]191def distribute_mesh(domain, verbose=False):
[3585]192
[3635]193    numprocs = size()
[3585]194
195   
196    # Subdivide the mesh
[7449]197    if verbose: print 'Subdivide mesh'
[8281]198    nodes, triangles, boundary, triangles_per_proc, quantities, \
199           s2p_map, p2s_map = \
[8260]200           pmesh_divide_metis_with_map(domain, numprocs)
[3585]201
[8260]202    #PETE: s2p_map (maps serial domain triangles to parallel domain triangles)
[8281]203    #      sp2_map (maps parallel domain triangles to domain triangles)
[3818]204
[7400]205
[3585]206    # Build the mesh that should be assigned to each processor,
[3818]207    # this includes ghost nodes and the communication pattern
[7449]208    if verbose: print 'Build submeshes'   
[3585]209    submesh = build_submesh(nodes, triangles, boundary,\
210                            quantities, triangles_per_proc)
211
[7449]212    if verbose:
213        for p in range(numprocs):
214            N = len(submesh['ghost_nodes'][p])               
215            M = len(submesh['ghost_triangles'][p])
216            print 'There are %d ghost nodes and %d ghost triangles on proc %d'\
217                  %(N, M, p)
[3926]218
219
[3585]220    # Send the mesh partition to the appropriate processor
[7449]221    if verbose: print 'Distribute submeshes'       
[3585]222    for p in range(1, numprocs):
[7449]223      send_submesh(submesh, triangles_per_proc, p, verbose)
[3585]224
225    # Build the local mesh for processor 0
[8281]226    points, vertices, boundary, quantities, \
227            ghost_recv_dict, full_send_dict, tri_map, node_map =\
[3585]228              extract_hostmesh(submesh, triangles_per_proc)
229
[3926]230    # Keep track of the number full nodes and triangles.
231    # This is useful later if one needs access to a ghost-free domain
232    # Here, we do it for process 0. The others are done in rec_submesh.
233    number_of_full_nodes = len(submesh['full_nodes'][0])
234    number_of_full_triangles = len(submesh['full_triangles'][0])
235       
236    #print
237    #for p in range(numprocs):
238    #    print 'Process %d:' %(p)
239    #
240    #    print 'full_triangles:'
241    #    print submesh['full_triangles'][p]
242    #
243    #    print 'full_nodes:'
244    #    print submesh['full_nodes'][p]
245    #
246    #    print 'ghost_triangles:'
247    #    print submesh['ghost_triangles'][p]#
248    #
249    #    print 'ghost_nodes:'
250    #   print submesh['ghost_nodes'][p]                               
251    #    print
252    #
253    #print 'Receive dict'
254    #print ghost_recv_dict
255    #
256    #print 'Send dict'
257    #print full_send_dict       
258
259
[3588]260    # Return structures necessary for building the parallel domain
[3926]261    return points, vertices, boundary, quantities,\
262           ghost_recv_dict, full_send_dict,\
[8281]263           number_of_full_nodes, number_of_full_triangles, \
264           s2p_map, p2s_map, tri_map, node_map
[3585]265   
266
267
[8281]268def extract_l2g_map(map):
269    # Extract l2g_map
[3585]270
[8281]271    import numpy as num
272   
273    b = num.arange(len(map))
274
275    l_ids = num.extract(map>-1,map)
276    g_ids = num.extract(map>-1,b)
277
278#    print len(g_ids)
279#    print len(l_ids)
280#    print l_ids
281
282    l2g = num.zeros_like(g_ids)
283    l2g[l_ids] = g_ids
284
285    return l2g
286
Note: See TracBrowser for help on using the repository browser.