1 | #!/usr/bin/env/python |
---|
2 | ######################################################### |
---|
3 | # |
---|
4 | # Main file for parallel mesh testing. |
---|
5 | # |
---|
6 | # This is a modification of the run_parallel_advection.py |
---|
7 | # file. |
---|
8 | # |
---|
9 | # *) The test files currently avaliable are of the form |
---|
10 | # test*.out, eg test_5l_4c.out. The term infront of the l |
---|
11 | # corresponds to the number of levels of refinement |
---|
12 | # required to build the grid, i.e. a higher number |
---|
13 | # corresponds to a finer grid. The term infront of the c |
---|
14 | # corresponds to the number of processors. |
---|
15 | # |
---|
16 | # *) The (new) files that have been added to manage the |
---|
17 | # grid partitioning are |
---|
18 | # +) pmesh_divide.py: subdivide a pmesh |
---|
19 | # +) build_submesh.py: build the submeshes on the host |
---|
20 | # processor. |
---|
21 | # +) build_local.py: build the GA mesh datastructure |
---|
22 | # on each processor. |
---|
23 | # +) build_commun.py: handle the communication between |
---|
24 | # the host and processors |
---|
25 | # |
---|
26 | # *) Things still to do: |
---|
27 | # +) Overlap the communication and computation: The |
---|
28 | # communication routines in build_commun.py should be |
---|
29 | # interdispersed in the build_submesh.py and build_local.py |
---|
30 | # files. This will overlap the communication and |
---|
31 | # computation and will be far more efficient. This should |
---|
32 | # be done after more testing and there more confidence in |
---|
33 | # the subpartioning. |
---|
34 | # +) Much more testing especially with large numbers of |
---|
35 | # processors. |
---|
36 | # Authors: Linda Stals, Steve Roberts and Matthew Hardy, |
---|
37 | # June 2005 |
---|
38 | # |
---|
39 | # |
---|
40 | # |
---|
41 | ######################################################### |
---|
42 | import sys |
---|
43 | import pypar # The Python-MPI interface |
---|
44 | import time |
---|
45 | |
---|
46 | |
---|
47 | from os import sep |
---|
48 | sys.path.append('..'+sep+'pyvolution') |
---|
49 | |
---|
50 | from Numeric import array |
---|
51 | # pmesh |
---|
52 | |
---|
53 | #from shallow_water import Domain |
---|
54 | |
---|
55 | from shallow_water import Domain |
---|
56 | from parallel_shallow_water import Parallel_Domain |
---|
57 | |
---|
58 | |
---|
59 | # mesh partition routines |
---|
60 | from parallel_meshes import parallel_rectangle |
---|
61 | |
---|
62 | |
---|
63 | from pmesh_divide import pmesh_divide, pmesh_divide_steve |
---|
64 | from build_submesh import * |
---|
65 | from build_local import * |
---|
66 | from build_commun import * |
---|
67 | from pmesh2domain import pmesh_to_domain_instance |
---|
68 | |
---|
69 | # read in the processor information |
---|
70 | |
---|
71 | numprocs = pypar.size() |
---|
72 | myid = pypar.rank() |
---|
73 | processor_name = pypar.Get_processor_name() |
---|
74 | |
---|
75 | |
---|
76 | |
---|
77 | |
---|
78 | |
---|
79 | M = 20 |
---|
80 | N = M*numprocs |
---|
81 | |
---|
82 | #N = M = 250 |
---|
83 | |
---|
84 | if myid == 0: |
---|
85 | print 'N == %d' %N |
---|
86 | |
---|
87 | points, vertices, boundary, full_send_dict, ghost_recv_dict = \ |
---|
88 | parallel_rectangle(N, M, len1_g=1.0*numprocs, len2_g=1.0) |
---|
89 | |
---|
90 | |
---|
91 | |
---|
92 | domain = Parallel_Domain(points, vertices, boundary, |
---|
93 | full_send_dict = full_send_dict, |
---|
94 | ghost_recv_dict = ghost_recv_dict) |
---|
95 | |
---|
96 | print 'number of triangles = ', domain.number_of_elements |
---|
97 | |
---|
98 | |
---|
99 | rect = [ 0.0, 0.0, 1.0*numprocs, 1.0] |
---|
100 | try: |
---|
101 | domain.initialise_visualiser(rect=rect) |
---|
102 | domain.visualiser.coloring['stage'] = True |
---|
103 | domain.visualiser.scale_z['stage'] = 1.0 |
---|
104 | domain.visualiser.scale_z['elevation'] = 0.05 |
---|
105 | except: |
---|
106 | print 'No visualiser' |
---|
107 | |
---|
108 | |
---|
109 | |
---|
110 | |
---|
111 | domain.default_order = 2 |
---|
112 | |
---|
113 | #Boundaries |
---|
114 | from parallel_shallow_water import Transmissive_boundary, Reflective_boundary |
---|
115 | |
---|
116 | T = Transmissive_boundary(domain) |
---|
117 | R = Reflective_boundary(domain) |
---|
118 | |
---|
119 | |
---|
120 | domain.set_boundary( {'left': R, 'right': R, 'bottom': R, 'top': R} ) |
---|
121 | domain.check_integrity() |
---|
122 | |
---|
123 | class Set_Stage: |
---|
124 | """Set an initial condition with constant water height, for x<x0 |
---|
125 | """ |
---|
126 | |
---|
127 | def __init__(self, x0=0.25, x1=0.5, y0=0.0, y1=1.0, h=1.0): |
---|
128 | self.x0 = x0 |
---|
129 | self.x1 = x1 |
---|
130 | self.y0 = y0 |
---|
131 | self.y1 = y1 |
---|
132 | self.h = h |
---|
133 | |
---|
134 | def __call__(self, x, y): |
---|
135 | return self.h*((x>self.x0)&(x<self.x1)&(y>self.y0)&(y<self.y1)) |
---|
136 | |
---|
137 | domain.set_quantity('stage', Set_Stage(0.2,0.4,0.25, 0.75, 1.0)) |
---|
138 | |
---|
139 | if myid == 0: |
---|
140 | import time |
---|
141 | t0 = time.time() |
---|
142 | |
---|
143 | yieldstep = 0.005 |
---|
144 | finaltime = 1.0 |
---|
145 | |
---|
146 | #Check that the boundary value gets propagated to all elements |
---|
147 | for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime): |
---|
148 | if myid == 0: |
---|
149 | domain.write_time() |
---|
150 | |
---|
151 | if myid == 0: |
---|
152 | print 'That took %.2f seconds' %(time.time()-t0) |
---|
153 | print 'Communication time %.2f seconds'%domain.communication_time |
---|
154 | print 'Reduction Communication time %.2f seconds'%domain.communication_reduce_time |
---|
155 | print 'Broadcast time %.2f seconds'%domain.communication_broadcast_time |
---|
156 | |
---|
157 | |
---|
158 | pypar.finalize() |
---|