source: trunk/anuga_core/source/anuga_parallel/sequential_distribute.py @ 8610

Last change on this file since 8610 was 8610, checked in by steve, 12 years ago

Added in some sequential distribute run scripts

File size: 9.6 KB
Line 
1"""Trying to lump parallel stuff into simpler interface
2
3
4"""
5
6import numpy as num
7
8from anuga_parallel.distribute_mesh  import send_submesh
9from anuga_parallel.distribute_mesh  import rec_submesh
10from anuga_parallel.distribute_mesh  import extract_submesh
11
12# Mesh partitioning using Metis
13from anuga_parallel.distribute_mesh import build_submesh
14from anuga_parallel.distribute_mesh import pmesh_divide_metis_with_map
15
16from anuga_parallel.parallel_shallow_water import Parallel_domain
17
18
19
20def sequential_distribute_dump(domain, numprocs=1, verbose=False, debug=False, parameters = None):
21    """ Distribute the domain, create parallel domain and pickle result
22    """
23
24
25    if debug:
26        verbose = True
27
28
29
30    # FIXME: Dummy assignment (until boundaries are refactored to
31    # be independent of domains until they are applied)
32    bdmap = {}
33    for tag in domain.get_boundary_tags():
34        bdmap[tag] = None
35
36    domain.set_boundary(bdmap)
37
38
39    if numprocs == 1 : return # Bypass
40
41
42    domain_name = domain.get_name()
43    domain_dir = domain.get_datadir()
44    domain_store = domain.get_store()
45    domain_minimum_storable_height = domain.minimum_storable_height
46    domain_flow_algorithm = domain.get_flow_algorithm()
47    domain_minimum_allowed_height = domain.get_minimum_allowed_height()
48    georef = domain.geo_reference
49    number_of_global_triangles = domain.number_of_triangles
50    number_of_global_nodes = domain.number_of_nodes
51    boundary_map = domain.boundary_map
52
53
54    #sequential_distribute_mesh(domain, numprocs, verbose=verbose, debug=debug, parameters=parameters)
55
56
57    # Subdivide the mesh
58    if verbose: print 'sequential_distribute: Subdivide mesh'
59    new_nodes, new_triangles, new_boundary, triangles_per_proc, quantities, \
60           s2p_map, p2s_map = \
61           pmesh_divide_metis_with_map(domain, numprocs)
62
63    #PETE: s2p_map (maps serial domain triangles to parallel domain triangles)
64    #      sp2_map (maps parallel domain triangles to domain triangles)
65
66
67
68    # Build the mesh that should be assigned to each processor,
69    # this includes ghost nodes and the communication pattern
70    if verbose: print 'sequential_distribute: Build submeshes'
71    submesh = build_submesh(new_nodes, new_triangles, new_boundary, quantities, triangles_per_proc, parameters)
72
73    if debug:
74        for p in range(numprocs):
75            N = len(submesh['ghost_nodes'][p])
76            M = len(submesh['ghost_triangles'][p])
77            print 'There are %d ghost nodes and %d ghost triangles on proc %d'\
78                  %(N, M, p)
79
80    #if debug:
81    #    from pprint import pprint
82    #    pprint(submesh)
83
84
85    # extract data to create parallel domain
86    if verbose: print 'sequential_distribute: Distribute submeshes'
87    for p in range(0, numprocs):
88
89        # Build the local mesh for processor 0
90        points, vertices, boundary, quantities, \
91            ghost_recv_dict, full_send_dict, tri_map, node_map, ghost_layer_width =\
92              extract_submesh(submesh, triangles_per_proc, p)
93
94
95#        from pprint import pprint
96#        print '='*80
97#        print p
98#        print '='*80
99#        pprint(tri_map)
100#        print len(tri_map)
101
102        # Keep track of the number full nodes and triangles.
103        # This is useful later if one needs access to a ghost-free domain
104        # Here, we do it for process 0. The others are done in rec_submesh.
105        number_of_full_nodes = len(submesh['full_nodes'][p])
106        number_of_full_triangles = len(submesh['full_triangles'][p])
107
108        # Extract l2g maps
109        tri_l2g  = extract_l2g_map(tri_map)
110        node_l2g = extract_l2g_map(node_map)
111
112
113        s2p_map = None
114        p2s_map = None
115
116        #------------------------------------------------------------------------
117        # Build the parallel domain for this processor using partion structures
118        #------------------------------------------------------------------------
119
120        if verbose:
121            print 'sequential_distribute: P%g, no_full_nodes = %g, no_full_triangles = %g' % (p, number_of_full_nodes, number_of_full_triangles)
122
123
124        #args = [points, vertices, boundary]
125
126        kwargs = {'full_send_dict': full_send_dict,
127                'ghost_recv_dict': ghost_recv_dict,
128                'number_of_full_nodes': number_of_full_nodes,
129                'number_of_full_triangles': number_of_full_triangles,
130                'geo_reference': georef,
131                'number_of_global_triangles':  number_of_global_triangles,
132                'number_of_global_nodes':  number_of_global_nodes,
133                'processor':  p,
134                'numproc':  numprocs,
135                's2p_map':  s2p_map,
136                'p2s_map':  p2s_map, ## jj added this
137                'tri_l2g':  tri_l2g, ## SR added this
138                'node_l2g':  node_l2g,
139                'ghost_layer_width':  ghost_layer_width}
140
141#        parallel_domain = Parallel_domain(points, vertices, boundary, **kwargs)
142
143
144
145        #------------------------------------------------------------------------
146        # Transfer initial conditions to each subdomain
147        #------------------------------------------------------------------------
148#        for q in quantities:
149#            parallel_domain.set_quantity(q, quantities[q])
150
151
152        #------------------------------------------------------------------------
153        # Transfer boundary conditions to each subdomain
154        #------------------------------------------------------------------------
155#        boundary_map['ghost'] = None  # Add binding to ghost boundary
156#        parallel_domain.set_boundary(boundary_map)
157
158
159        #------------------------------------------------------------------------
160        # Transfer other attributes to each subdomain
161        #------------------------------------------------------------------------
162#        parallel_domain.set_name(domain_name)
163#        parallel_domain.set_datadir(domain_dir)
164#        parallel_domain.set_store(domain_store)
165#        parallel_domain.set_minimum_storable_height(domain_minimum_storable_height)
166#        parallel_domain.set_minimum_allowed_height(domain_minimum_allowed_height)
167#        parallel_domain.set_flow_algorithm(domain_flow_algorithm)
168#        parallel_domain.geo_reference = georef
169
170
171
172        #-----------------------------------------------------------------------
173        # Now let's store the parallel_domain via cPickle
174        #-----------------------------------------------------------------------
175#        import cPickle
176#        pickle_name = domain_name + '_P%g_%g.pickle'% (numprocs,p)
177#        f = file(pickle_name, 'wb')
178#        cPickle.dump(parallel_domain, f, protocol=cPickle.HIGHEST_PROTOCOL)
179#        f.close()
180
181
182        #FIXME SR: Looks like we could reduce storage by a factor of 4 by just
183        # storing the data to create the parallel_domain instead of pickling
184        # a created domain
185        import cPickle
186        pickle_name = domain_name + '_P%g_%g.pickle'% (numprocs,p)
187        f = file(pickle_name, 'wb')
188        tostore = (kwargs, points, vertices, boundary, quantities, boundary_map, domain_name, domain_dir, domain_store, domain_minimum_storable_height, \
189                   domain_minimum_allowed_height, domain_flow_algorithm, georef)
190        cPickle.dump( tostore, f, protocol=cPickle.HIGHEST_PROTOCOL)
191
192    return
193
194
195def sequential_distribute_load(filename = 'domain', verbose = False):
196
197
198    from anuga_parallel import myid, numprocs
199
200
201    #---------------------------------------------------------------------------
202    # Open pickle files
203    #---------------------------------------------------------------------------
204    import cPickle
205    pickle_name = filename+'_P%g_%g.pickle'% (numprocs,myid)
206    f = file(pickle_name, 'rb')
207    kwargs, points, vertices, boundary, quantities, boundary_map, domain_name, domain_dir, domain_store, domain_minimum_storable_height, \
208                   domain_minimum_allowed_height, domain_flow_algorithm, georef = cPickle.load(f)
209    f.close()
210
211    #---------------------------------------------------------------------------
212    # Create parallel domain
213    #---------------------------------------------------------------------------
214    parallel_domain = Parallel_domain(points, vertices, boundary, **kwargs)
215
216
217    #------------------------------------------------------------------------
218    # Copy in quantity data
219    #------------------------------------------------------------------------
220    for q in quantities:
221        parallel_domain.set_quantity(q, quantities[q])
222
223
224    #------------------------------------------------------------------------
225    # Transfer boundary conditions to each subdomain
226    #------------------------------------------------------------------------
227    boundary_map['ghost'] = None  # Add binding to ghost boundary
228    parallel_domain.set_boundary(boundary_map)
229
230
231    #------------------------------------------------------------------------
232    # Transfer other attributes to each subdomain
233    #------------------------------------------------------------------------
234    parallel_domain.set_name(domain_name)
235    parallel_domain.set_datadir(domain_dir)
236    parallel_domain.set_store(domain_store)
237    parallel_domain.set_minimum_storable_height(domain_minimum_storable_height)
238    parallel_domain.set_minimum_allowed_height(domain_minimum_allowed_height)
239    parallel_domain.set_flow_algorithm(domain_flow_algorithm)
240    parallel_domain.geo_reference = georef
241
242
243    return parallel_domain
244
245def extract_l2g_map(map):
246    # Extract l2g data  from corresponding map
247    # Maps
248
249    import numpy as num
250
251    b = num.arange(len(map))
252
253    l_ids = num.extract(map>-1,map)
254    g_ids = num.extract(map>-1,b)
255
256
257#    print len(g_ids)
258#    print len(l_ids)
259#    print l_ids
260#    print g_ids
261
262    l2g = num.zeros_like(g_ids)
263    l2g[l_ids] = g_ids
264
265    return l2g
266
267
268
269
270
Note: See TracBrowser for help on using the repository browser.