1 | """Trying to lump parallel stuff into simpler interface |
---|
2 | |
---|
3 | |
---|
4 | """ |
---|
5 | |
---|
6 | # Parallelism |
---|
7 | |
---|
8 | # The abstract Python-MPI interface |
---|
9 | from anuga_parallel.parallel_abstraction import size, rank, get_processor_name |
---|
10 | from anuga_parallel.parallel_abstraction import finalize, send, receive |
---|
11 | from anuga_parallel.parallel_abstraction import pypar_available |
---|
12 | |
---|
13 | # Mesh partitioning |
---|
14 | from anuga_parallel.pmesh_divide import pmesh_divide_metis |
---|
15 | from anuga_parallel.build_submesh import build_submesh |
---|
16 | from anuga_parallel.build_local import build_local_mesh |
---|
17 | |
---|
18 | # ANUGA parallel engine (only load if pypar can) |
---|
19 | if pypar_available: |
---|
20 | from anuga_parallel.build_commun import send_submesh |
---|
21 | from anuga_parallel.build_commun import rec_submesh |
---|
22 | from anuga_parallel.build_commun import extract_hostmesh |
---|
23 | from anuga_parallel.parallel_shallow_water import Parallel_Domain |
---|
24 | |
---|
25 | |
---|
26 | #------------------------------------------------------------------------------ |
---|
27 | # Read in processor information |
---|
28 | #------------------------------------------------------------------------------ |
---|
29 | |
---|
30 | numprocs = size() |
---|
31 | myid = rank() |
---|
32 | processor_name = get_processor_name() |
---|
33 | print 'I am processor %d of %d on node %s' %(myid, numprocs, processor_name) |
---|
34 | |
---|
35 | |
---|
36 | |
---|
37 | |
---|
38 | def distribute(domain, verbose=False): |
---|
39 | """ Distribute the domain to all processes |
---|
40 | """ |
---|
41 | |
---|
42 | |
---|
43 | # FIXME: Dummy assignment (until boundaries are refactored to |
---|
44 | # be independent of domains until they are applied) |
---|
45 | bdmap = {} |
---|
46 | for tag in domain.get_boundary_tags(): |
---|
47 | bdmap[tag] = None |
---|
48 | |
---|
49 | |
---|
50 | domain.set_boundary(bdmap) |
---|
51 | |
---|
52 | |
---|
53 | |
---|
54 | |
---|
55 | if not pypar_available: return domain # Bypass |
---|
56 | |
---|
57 | # For some obscure reason this communication must happen prior to |
---|
58 | # the more complex mesh distribution - Oh Well! |
---|
59 | if myid == 0: |
---|
60 | domain_name = domain.get_name() |
---|
61 | domain_dir = domain.get_datadir() |
---|
62 | # FIXME - what other attributes need to be transferred? |
---|
63 | |
---|
64 | for p in range(1, numprocs): |
---|
65 | send((domain_name, domain_dir), p) |
---|
66 | else: |
---|
67 | if verbose: print 'P%d: Receiving domain attributes' %(myid) |
---|
68 | |
---|
69 | domain_name, domain_dir = receive(0) |
---|
70 | |
---|
71 | |
---|
72 | |
---|
73 | # Distribute boundary conditions |
---|
74 | # FIXME: This cannot handle e.g. Time_boundaries due to |
---|
75 | # difficulties pickling functions |
---|
76 | if myid == 0: |
---|
77 | boundary_map = domain.boundary_map |
---|
78 | for p in range(1, numprocs): |
---|
79 | send(boundary_map, p) |
---|
80 | else: |
---|
81 | if verbose: print 'P%d: Receiving boundary map' %(myid) |
---|
82 | |
---|
83 | boundary_map = receive(0) |
---|
84 | |
---|
85 | |
---|
86 | |
---|
87 | |
---|
88 | if myid == 0: |
---|
89 | # Partition and distribute mesh. |
---|
90 | # Structures returned is in the |
---|
91 | # correct form for the ANUGA data structure |
---|
92 | |
---|
93 | |
---|
94 | points, vertices, boundary, quantities,\ |
---|
95 | ghost_recv_dict, full_send_dict,\ |
---|
96 | = distribute_mesh(domain) |
---|
97 | |
---|
98 | if verbose: print 'Communication done' |
---|
99 | |
---|
100 | else: |
---|
101 | # Read in the mesh partition that belongs to this |
---|
102 | # processor |
---|
103 | if verbose: print 'P%d: Receiving submeshes' %(myid) |
---|
104 | points, vertices, boundary, quantities,\ |
---|
105 | ghost_recv_dict, full_send_dict,\ |
---|
106 | = rec_submesh(0) |
---|
107 | |
---|
108 | |
---|
109 | #------------------------------------------------------------------------ |
---|
110 | # Build the domain for this processor using partion structures |
---|
111 | #------------------------------------------------------------------------ |
---|
112 | domain = Parallel_Domain(points, vertices, boundary, |
---|
113 | full_send_dict = full_send_dict, |
---|
114 | ghost_recv_dict = ghost_recv_dict) |
---|
115 | |
---|
116 | #------------------------------------------------------------------------ |
---|
117 | # Transfer initial conditions to each subdomain |
---|
118 | #------------------------------------------------------------------------ |
---|
119 | for q in quantities: |
---|
120 | domain.set_quantity(q, quantities[q]) |
---|
121 | |
---|
122 | |
---|
123 | #------------------------------------------------------------------------ |
---|
124 | # Transfer boundary conditions to each subdomain |
---|
125 | #------------------------------------------------------------------------ |
---|
126 | boundary_map['ghost'] = None # Add binding to ghost boundary |
---|
127 | domain.set_boundary(boundary_map) |
---|
128 | |
---|
129 | |
---|
130 | #------------------------------------------------------------------------ |
---|
131 | # Transfer other attributes to each subdomain |
---|
132 | #------------------------------------------------------------------------ |
---|
133 | domain.set_name(domain_name) |
---|
134 | domain.set_datadir(domain_dir) |
---|
135 | |
---|
136 | #------------------------------------------------------------------------ |
---|
137 | # Return parallel domain to all nodes |
---|
138 | #------------------------------------------------------------------------ |
---|
139 | return domain |
---|
140 | |
---|
141 | |
---|
142 | |
---|
143 | |
---|
144 | |
---|
145 | |
---|
146 | |
---|
147 | def distribute_mesh(domain): |
---|
148 | |
---|
149 | numprocs = size() |
---|
150 | |
---|
151 | |
---|
152 | # Subdivide the mesh |
---|
153 | print 'Subdivide mesh' |
---|
154 | nodes, triangles, boundary, triangles_per_proc, quantities = \ |
---|
155 | pmesh_divide_metis(domain, numprocs) |
---|
156 | |
---|
157 | |
---|
158 | # Build the mesh that should be assigned to each processor, |
---|
159 | # this includes ghost nodes and the communication pattern |
---|
160 | print 'Build submeshes' |
---|
161 | submesh = build_submesh(nodes, triangles, boundary,\ |
---|
162 | quantities, triangles_per_proc) |
---|
163 | |
---|
164 | # Send the mesh partition to the appropriate processor |
---|
165 | print 'Distribute submeshes' |
---|
166 | for p in range(1, numprocs): |
---|
167 | send_submesh(submesh, triangles_per_proc, p) |
---|
168 | |
---|
169 | # Build the local mesh for processor 0 |
---|
170 | points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict = \ |
---|
171 | extract_hostmesh(submesh, triangles_per_proc) |
---|
172 | |
---|
173 | # Return structures necessary for building the parallel domain |
---|
174 | return points, vertices, boundary, quantities, \ |
---|
175 | ghost_recv_dict, full_send_dict |
---|
176 | |
---|
177 | |
---|
178 | |
---|
179 | |
---|