source: trunk/anuga_core/source/anuga_parallel/sequential_distribute.py @ 8856

Last change on this file since 8856 was 8680, checked in by steve, 12 years ago

Some commits to sequential distribute

File size: 8.0 KB
Line 
1"""Trying to lump parallel stuff into simpler interface
2
3
4"""
5
6import numpy as num
7
8from anuga_parallel.distribute_mesh  import send_submesh
9from anuga_parallel.distribute_mesh  import rec_submesh
10from anuga_parallel.distribute_mesh  import extract_submesh
11
12# Mesh partitioning using Metis
13from anuga_parallel.distribute_mesh import build_submesh
14from anuga_parallel.distribute_mesh import pmesh_divide_metis_with_map
15
16from anuga_parallel.parallel_shallow_water import Parallel_domain
17
18
19
20def sequential_distribute_dump(domain, numprocs=1, verbose=False, debug=False, parameters = None):
21    """ Distribute the domain, create parallel domain and pickle result
22    """
23
24
25    if debug:
26        verbose = True
27
28
29
30    # FIXME: Dummy assignment (until boundaries are refactored to
31    # be independent of domains until they are applied)
32    bdmap = {}
33    for tag in domain.get_boundary_tags():
34        bdmap[tag] = None
35
36    domain.set_boundary(bdmap)
37
38
39    domain_name = domain.get_name()
40    domain_dir = domain.get_datadir()
41    domain_store = domain.get_store()
42    domain_minimum_storable_height = domain.minimum_storable_height
43    domain_flow_algorithm = domain.get_flow_algorithm()
44    domain_minimum_allowed_height = domain.get_minimum_allowed_height()
45    georef = domain.geo_reference
46    number_of_global_triangles = domain.number_of_triangles
47    number_of_global_nodes = domain.number_of_nodes
48    boundary_map = domain.boundary_map
49
50
51    #sequential_distribute_mesh(domain, numprocs, verbose=verbose, debug=debug, parameters=parameters)
52
53
54    # Subdivide the mesh
55    if verbose: print 'sequential_distribute: Subdivide mesh'
56    new_nodes, new_triangles, new_boundary, triangles_per_proc, quantities, \
57           s2p_map, p2s_map = \
58           pmesh_divide_metis_with_map(domain, numprocs)
59
60    #PETE: s2p_map (maps serial domain triangles to parallel domain triangles)
61    #      sp2_map (maps parallel domain triangles to domain triangles)
62
63
64
65    # Build the mesh that should be assigned to each processor,
66    # this includes ghost nodes and the communication pattern
67    if verbose: print 'sequential_distribute: Build submeshes'
68    submesh = build_submesh(new_nodes, new_triangles, new_boundary, quantities, triangles_per_proc, parameters)
69
70    if debug:
71        for p in range(numprocs):
72            N = len(submesh['ghost_nodes'][p])
73            M = len(submesh['ghost_triangles'][p])
74            print 'There are %d ghost nodes and %d ghost triangles on proc %d'\
75                  %(N, M, p)
76
77    #if debug:
78    #    from pprint import pprint
79    #    pprint(submesh)
80
81
82    # extract data to create parallel domain
83    if verbose: print 'sequential_distribute: Distribute submeshes'
84    for p in range(0, numprocs):
85
86        # Build the local mesh for processor 0
87        points, vertices, boundary, quantities, \
88            ghost_recv_dict, full_send_dict, tri_map, node_map, ghost_layer_width =\
89              extract_submesh(submesh, triangles_per_proc, p)
90
91
92#        from pprint import pprint
93#        print '='*80
94#        print p
95#        print '='*80
96#        pprint(tri_map)
97#        print len(tri_map)
98
99        # Keep track of the number full nodes and triangles.
100        # This is useful later if one needs access to a ghost-free domain
101        # Here, we do it for process 0. The others are done in rec_submesh.
102        number_of_full_nodes = len(submesh['full_nodes'][p])
103        number_of_full_triangles = len(submesh['full_triangles'][p])
104
105        # Extract l2g maps
106        tri_l2g  = extract_l2g_map(tri_map)
107        node_l2g = extract_l2g_map(node_map)
108
109
110        s2p_map = None
111        p2s_map = None
112
113        #------------------------------------------------------------------------
114        # Build the parallel domain for this processor using partion structures
115        #------------------------------------------------------------------------
116
117        if verbose:
118            print 'sequential_distribute: P%g, no_full_nodes = %g, no_full_triangles = %g' % (p, number_of_full_nodes, number_of_full_triangles)
119
120
121        #args = [points, vertices, boundary]
122
123        kwargs = {'full_send_dict': full_send_dict,
124                'ghost_recv_dict': ghost_recv_dict,
125                'number_of_full_nodes': number_of_full_nodes,
126                'number_of_full_triangles': number_of_full_triangles,
127                'geo_reference': georef,
128                'number_of_global_triangles':  number_of_global_triangles,
129                'number_of_global_nodes':  number_of_global_nodes,
130                'processor':  p,
131                'numproc':  numprocs,
132                's2p_map':  s2p_map,
133                'p2s_map':  p2s_map, ## jj added this
134                'tri_l2g':  tri_l2g, ## SR added this
135                'node_l2g':  node_l2g,
136                'ghost_layer_width':  ghost_layer_width}
137
138        #-----------------------------------------------------------------------
139        # Now let's store the data for a  parallel_domain via cPickle
140        #-----------------------------------------------------------------------
141
142        #Looks like we reduce storage by a factor of 4 by just
143        # storing the data to create the parallel_domain instead of pickling
144        # a created domain
145        import cPickle
146        pickle_name = domain_name + '_P%g_%g.pickle'% (numprocs,p)
147        f = file(pickle_name, 'wb')
148        tostore = (kwargs, points, vertices, boundary, quantities, boundary_map, domain_name, domain_dir, domain_store, domain_minimum_storable_height, \
149                   domain_minimum_allowed_height, domain_flow_algorithm, georef)
150        cPickle.dump( tostore, f, protocol=cPickle.HIGHEST_PROTOCOL)
151
152    return
153
154
155def sequential_distribute_load(filename = 'domain', verbose = False):
156
157
158    from anuga_parallel import myid, numprocs
159
160
161    #---------------------------------------------------------------------------
162    # Open pickle files
163    #---------------------------------------------------------------------------
164    import cPickle
165    pickle_name = filename+'_P%g_%g.pickle'% (numprocs,myid)
166    f = file(pickle_name, 'rb')
167    kwargs, points, vertices, boundary, quantities, boundary_map, domain_name, domain_dir, domain_store, domain_minimum_storable_height, \
168                   domain_minimum_allowed_height, domain_flow_algorithm, georef = cPickle.load(f)
169    f.close()
170
171    #---------------------------------------------------------------------------
172    # Create parallel domain
173    #---------------------------------------------------------------------------
174    parallel_domain = Parallel_domain(points, vertices, boundary, **kwargs)
175
176
177    #------------------------------------------------------------------------
178    # Copy in quantity data
179    #------------------------------------------------------------------------
180    for q in quantities:
181        parallel_domain.set_quantity(q, quantities[q])
182
183
184    #------------------------------------------------------------------------
185    # Transfer boundary conditions to each subdomain
186    #------------------------------------------------------------------------
187    boundary_map['ghost'] = None  # Add binding to ghost boundary
188    parallel_domain.set_boundary(boundary_map)
189
190
191    #------------------------------------------------------------------------
192    # Transfer other attributes to each subdomain
193    #------------------------------------------------------------------------
194    parallel_domain.set_name(domain_name)
195    parallel_domain.set_datadir(domain_dir)
196    parallel_domain.set_store(domain_store)
197    parallel_domain.set_minimum_storable_height(domain_minimum_storable_height)
198    parallel_domain.set_minimum_allowed_height(domain_minimum_allowed_height)
199    parallel_domain.set_flow_algorithm(domain_flow_algorithm)
200    parallel_domain.geo_reference = georef
201
202
203    return parallel_domain
204
205def extract_l2g_map(map):
206    # Extract l2g data  from corresponding map
207    # Maps
208
209    import numpy as num
210
211    b = num.arange(len(map))
212
213    l_ids = num.extract(map>-1,map)
214    g_ids = num.extract(map>-1,b)
215
216
217#    print len(g_ids)
218#    print len(l_ids)
219#    print l_ids
220#    print g_ids
221
222    l2g = num.zeros_like(g_ids)
223    l2g[l_ids] = g_ids
224
225    return l2g
226
227
228
229
230
Note: See TracBrowser for help on using the repository browser.