source: anuga_core/source/anuga_parallel/run_parallel_sw_merimbula_test.py @ 3593

Last change on this file since 3593 was 3579, checked in by ole, 18 years ago

Removed all references to pyvolution in parallel code

File size: 7.6 KB
Line 
1#!/usr/bin/env python
2#########################################################
3#
4#  Main file for parallel mesh testing.
5#
6#  This is a modification of the run_parallel_advection.py
7# file.
8#
9#
10# *) The (new) files that have been added to manage the
11# grid partitioning are
12#    +) pmesh_divide_metis.py: subdivide a pmesh
13#    +) build_submesh.py: build the submeshes on the host
14# processor.
15#    +) build_local.py: build the GA mesh datastructure
16# on each processor.
17#    +) build_commun.py: handle the communication between
18# the host and processors
19#
20# *) Things still to do:
21#    +) Overlap the communication and computation: The
22# communication routines in build_commun.py should be
23# interdispersed in the build_submesh.py and build_local.py
24# files. This will overlap the communication and
25# computation and will be far more efficient. This should
26# be done after more testing and there more confidence in
27# the subpartioning.
28#    +) Much more testing especially with large numbers of
29# processors.
30#  Authors: Linda Stals, Steve Roberts and Matthew Hardy,
31# June 2005
32#
33#
34#
35#########################################################
36import sys
37import pypar    # The Python-MPI interface
38import time
39
40# Numeric arrays
41from Numeric import array, zeros, Float
42
43# Print debugging information
44from print_stats import print_test_stats, build_full_flag
45
46# pmesh
47from anuga.shallow_water import Domain
48from parallel_shallow_water import Parallel_Domain
49from anuga.abstract_2d_finite_volumes.pmesh2domain\
50     import pmesh_to_domain_instance
51
52# Reuse previous mesh import
53from anuga.caching import cache
54
55# Mesh partition routines
56from pmesh_divide  import pmesh_divide_metis
57from build_submesh import build_submesh
58from build_local   import build_local_mesh
59from build_commun  import send_submesh, rec_submesh, extract_hostmesh
60
61
62###############################
63# Read in processor information
64###############################
65
66numprocs = pypar.size()
67myid = pypar.rank()
68processor_name = pypar.Get_processor_name()
69
70############################
71# Set the initial conditions
72############################
73
74rect = zeros( 4, Float) # Buffer for results
75
76class Set_Stage:
77    """Set an initial condition with constant water height, for x<x0
78    """
79
80    def __init__(self, x0=0.25, x1=0.5, h=1.0):
81        self.x0 = x0
82        self.x1 = x1
83        self.= h
84
85    def __call__(self, x, y):
86        return self.h*((x>self.x0)&(x<self.x1))
87
88#######################
89# Partition the domain
90#######################
91
92if myid == 0:
93
94    # Read in the test files
95
96    filename = 'test-100.tsh'
97#    filename = 'merimbula_10785_1.tsh'
98
99    # Build the whole domain
100   
101    domain_full = pmesh_to_domain_instance(filename, Domain)
102
103#    domain_full = cache(pmesh_to_domain_instance,
104#               (filename, Domain),
105#              dependencies = [filename])
106
107    rect = array(domain_full.xy_extent, Float)
108
109    # Initialise the wave
110
111    domain_full.set_quantity('stage', Set_Stage(200.0,300.0,1.0))
112#    domain_full.set_quantity('stage', Set_Stage(756000.0,756500.0,2.0))
113#    domain_full.set_quantity('stage', Set_Stage(756000.0,756500.0,0.0))
114
115    # Subdivide the domain
116
117    # Note the different arguments compared with pmesh_divide,
118    # pmesh_divide_steve etc.
119   
120    nodes, triangles, boundary, triangles_per_proc, quantities = \
121         pmesh_divide_metis(domain_full, numprocs)
122
123    print triangles_per_proc
124   
125    rect = array(domain_full.xy_extent, Float)
126
127    submesh = build_submesh(nodes, triangles, boundary,\
128                            quantities, triangles_per_proc)
129
130    # Send the mesh partition to the appropriate processor
131
132    for p in range(1, numprocs):
133      send_submesh(submesh, triangles_per_proc, p)
134
135    # Build the local mesh for processor 0
136
137    points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict = \
138             extract_hostmesh(submesh, triangles_per_proc)
139
140# Read in the mesh partition that belongs to this
141# processor (note that the information is in the
142# correct form for the GA data structure
143
144else:
145    points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict \
146            = rec_submesh(0)
147
148
149###########################################
150# Start the computations on each subpartion
151###########################################
152
153#if myid == 0:
154#    print 'ghost'
155#    print ghost_recv_dict
156#processor_name
157#if myid == 0:
158#    print 'full'
159#    print full_send_dict
160
161# The visualiser needs to know the size of the whole domain
162
163pypar.broadcast(rect,0)
164
165domain = Parallel_Domain(points, vertices, boundary,
166                         full_send_dict  = full_send_dict,
167                         ghost_recv_dict = ghost_recv_dict)
168
169# Make a note of which triangles are full and which are ghost
170
171tri_full_flag = build_full_flag(domain, ghost_recv_dict)
172
173try:
174    #domain.initialise_visualiser(rect=rect)
175    #domain.visualiser.coloring['stage'] = True
176    #domain.visualiser.scale_z['stage'] = 0.2
177    #domain.visualiser.scale_z['elevation'] = 0.05
178    pass
179except:
180    print 'No visualiser'
181
182
183
184domain.default_order = 1
185
186#Boundaries
187from parallel_shallow_water import Transmissive_boundary, Reflective_boundary
188
189T = Transmissive_boundary(domain)
190R = Reflective_boundary(domain)
191domain.set_boundary( {'outflow': R, 'inflow': R, 'inner':R, 'exterior': R, 'open':R, 'ghost':None} )
192
193
194domain.set_quantity('stage', quantities['stage'])
195domain.set_quantity('elevation', quantities['elevation'])
196
197domain.store = False
198#domain.filename = 'merimbula-%d' %domain.processor
199
200#---------
201# Evolution
202t0 = time.time()
203
204print 'Processor %d on %s: No of elements %d'%(domain.processor,processor_name,domain.number_of_elements)
205yieldstep = 0.05
206finaltime = 5.0
207
208yieldstep = 1
209finaltime = None
210
211#yieldstep = 1
212#finaltime = 1
213#processor_name
214#for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime):
215#    if myid == 0:
216#        domain.write_time()
217        #print 'Processor %d, Integral of stage %d'%\
218        #       (domain.processor,domain.quantities['stage'].get_integral())
219        #    print_test_stats(domain, tri_full_flag)
220
221
222# Profiling
223#import profile
224#profiler = profile.Profile()
225#result.dump_stats("profile." + str(numprocs) + "." + str(myid) + ".dat")
226
227## #New hotshot profiling
228## import hotshot
229## profiler = hotshot.Profile("hotshot." + str(numprocs) + "." + str(myid) + ".prof")
230## s = '''for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime):
231##   if myid == 0:
232##     domain.write_time()
233##   print_test_stats(domain, tri_full_flag)
234
235## '''
236## result = profiler.runctx(s, globals(), locals())
237## profiler.close()
238
239#from vtk_realtime_visualiser import Visualiser
240#V = Visualiser(domain,default_scale_z=100.0)
241#V.coloring['stage'] = True
242#V.coloring['elevation'] = False
243#V.setup['elevation']=True
244#V.updating['stage']=True
245#V.qcolor['stage'] = (0.1,0.4,0.99)
246
247
248#V.start()
249#V.idle.wait()
250#V.idle.clear()
251
252
253
254
255
256for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime):
257    if myid == 0:
258        domain.write_time()
259        #print_test_stats(domain, tri_full_flag)
260
261#    V.redraw_ready.set()
262#    V.idle.wait()
263#    V.idle.clear()
264#    V.unpaused.wait()
265
266
267#print 'P%d: That took %.2f seconds' %(myid, time.time()-t0)
268#print 'P%d: Communication time %.2f seconds' %(myid, domain.communication_time)
269#print 'P%d: Reduction Communication time %.2f seconds' %(myid, domain.communication_reduce_time)
270#print 'P%d: Broadcast time %.2f seconds' %(myid, domain.communication_broadcast_time)
271
272
273
274if myid == 0:
275    print 'That took %.2f seconds' %(time.time()-t0)
276    print 'Communication time %.2f seconds'%domain.communication_time
277    print 'Reduction Communication time %.2f seconds'%domain.communication_reduce_time
278    print 'Broadcast time %.2f seconds'%domain.communication_broadcast_time
Note: See TracBrowser for help on using the repository browser.