1 | """Trying to lump parallel stuff into simpler interface |
---|
2 | |
---|
3 | |
---|
4 | """ |
---|
5 | |
---|
6 | # Parallelism |
---|
7 | import pypar # The Python-MPI interface |
---|
8 | from anuga_parallel.pmesh_divide import pmesh_divide_metis |
---|
9 | from anuga_parallel.build_submesh import build_submesh |
---|
10 | from anuga_parallel.build_local import build_local_mesh |
---|
11 | from anuga_parallel.build_commun import send_submesh, rec_submesh, extract_hostmesh |
---|
12 | from anuga_parallel.parallel_shallow_water import Parallel_Domain |
---|
13 | |
---|
14 | |
---|
15 | #------------------------------------------------------------------------------ |
---|
16 | # Read in processor information |
---|
17 | #------------------------------------------------------------------------------ |
---|
18 | |
---|
19 | numprocs = pypar.size() |
---|
20 | myid = pypar.rank() |
---|
21 | processor_name = pypar.Get_processor_name() |
---|
22 | print 'I am processor %d of %d on node %s' %(myid, numprocs, processor_name) |
---|
23 | |
---|
24 | |
---|
25 | |
---|
26 | |
---|
27 | def distribute(domain, verbose=False): |
---|
28 | """ Distribute the domain to all processes |
---|
29 | """ |
---|
30 | |
---|
31 | # For some obscure reason this communication must happen prior to |
---|
32 | # the more complex mesh distribution - Oh Well! |
---|
33 | if myid == 0: |
---|
34 | domain_name = domain.get_name() |
---|
35 | for p in range(1, numprocs): |
---|
36 | print 'p', p |
---|
37 | pypar.send(domain_name, p) |
---|
38 | else: |
---|
39 | if verbose: print 'Receiving' |
---|
40 | |
---|
41 | domain_name = pypar.receive(0) |
---|
42 | |
---|
43 | |
---|
44 | if myid == 0: |
---|
45 | # Partition and distribute mesh. |
---|
46 | # Structures returned is in the |
---|
47 | # correct form for the ANUGA data structure |
---|
48 | |
---|
49 | |
---|
50 | points, vertices, boundary, quantities,\ |
---|
51 | ghost_recv_dict, full_send_dict,\ |
---|
52 | = distribute_mesh(domain) |
---|
53 | |
---|
54 | if verbose: print 'Communication done' |
---|
55 | |
---|
56 | else: |
---|
57 | # Read in the mesh partition that belongs to this |
---|
58 | # processor |
---|
59 | points, vertices, boundary, quantities,\ |
---|
60 | ghost_recv_dict, full_send_dict,\ |
---|
61 | = rec_submesh(0) |
---|
62 | |
---|
63 | |
---|
64 | |
---|
65 | #------------------------------------------------------------------------ |
---|
66 | # Build the domain for this processor using partion structures |
---|
67 | #------------------------------------------------------------------------ |
---|
68 | domain = Parallel_Domain(points, vertices, boundary, |
---|
69 | full_send_dict = full_send_dict, |
---|
70 | ghost_recv_dict = ghost_recv_dict) |
---|
71 | |
---|
72 | #------------------------------------------------------------------------ |
---|
73 | # Transfer initial conditions to each subdomain |
---|
74 | #------------------------------------------------------------------------ |
---|
75 | for q in quantities: |
---|
76 | domain.set_quantity(q, quantities[q]) |
---|
77 | |
---|
78 | |
---|
79 | #------------------------------------------------------------------------ |
---|
80 | # Transfer other attributes to each subdomain |
---|
81 | #------------------------------------------------------------------------ |
---|
82 | |
---|
83 | # FIXME Do them all |
---|
84 | domain.set_name(domain_name) |
---|
85 | |
---|
86 | #------------------------------------------------------------------------ |
---|
87 | # Return parallel domain to all nodes |
---|
88 | #------------------------------------------------------------------------ |
---|
89 | return domain |
---|
90 | |
---|
91 | |
---|
92 | |
---|
93 | |
---|
94 | |
---|
95 | |
---|
96 | |
---|
97 | def distribute_mesh(domain): |
---|
98 | |
---|
99 | numprocs = pypar.size() |
---|
100 | |
---|
101 | |
---|
102 | # Subdivide the mesh |
---|
103 | print 'Subdivide mesh' |
---|
104 | nodes, triangles, boundary, triangles_per_proc, quantities = \ |
---|
105 | pmesh_divide_metis(domain, numprocs) |
---|
106 | |
---|
107 | # Build the mesh that should be assigned to each processor, |
---|
108 | # this includes ghost nodes and the communicaiton pattern |
---|
109 | print 'Build submeshes' |
---|
110 | submesh = build_submesh(nodes, triangles, boundary,\ |
---|
111 | quantities, triangles_per_proc) |
---|
112 | |
---|
113 | # Send the mesh partition to the appropriate processor |
---|
114 | print 'Distribute submeshes' |
---|
115 | for p in range(1, numprocs): |
---|
116 | send_submesh(submesh, triangles_per_proc, p) |
---|
117 | |
---|
118 | # Build the local mesh for processor 0 |
---|
119 | points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict = \ |
---|
120 | extract_hostmesh(submesh, triangles_per_proc) |
---|
121 | |
---|
122 | # Return structures necessary for building the parallel domain |
---|
123 | return points, vertices, boundary, quantities, \ |
---|
124 | ghost_recv_dict, full_send_dict |
---|
125 | |
---|
126 | |
---|
127 | |
---|
128 | |
---|