source: trunk/anuga_core/source/anuga_parallel/run_parallel_sw_merimbula_test.py @ 8022

Last change on this file since 8022 was 7615, checked in by steve, 14 years ago

Added larger merimbula mesh

File size: 7.7 KB
Line 
1#!/usr/bin/env python
2#########################################################
3#
4#  Main file for parallel mesh testing.
5#
6#  This is a modification of the run_parallel_advection.py
7# file.
8#
9#
10# *) The (new) files that have been added to manage the
11# grid partitioning are
12#    +) pmesh_divide_metis.py: subdivide a pmesh
13#    +) build_submesh.py: build the submeshes on the host
14# processor.
15#    +) build_local.py: build the GA mesh datastructure
16# on each processor.
17#    +) build_commun.py: handle the communication between
18# the host and processors
19#
20# *) Things still to do:
21#    +) Overlap the communication and computation: The
22# communication routines in build_commun.py should be
23# interdispersed in the build_submesh.py and build_local.py
24# files. This will overlap the communication and
25# computation and will be far more efficient. This should
26# be done after more testing and there more confidence in
27# the subpartioning.
28#    +) Much more testing especially with large numbers of
29# processors.
30#  Authors: Linda Stals, Steve Roberts and Matthew Hardy,
31# June 2005
32#
33#
34#
35#########################################################
36import sys
37import pypar    # The Python-MPI interface
38import time
39
40# Numeric arrays
41import numpy as num
42#from numpy import array, zeros, float
43
44# Print debugging information
45from print_stats import print_test_stats, build_full_flag
46
47# pmesh
48from anuga.shallow_water import Domain
49from parallel_shallow_water import Parallel_domain
50from anuga.abstract_2d_finite_volumes.pmesh2domain\
51     import pmesh_to_domain_instance
52
53# Reuse previous mesh import
54from anuga.caching import cache
55
56# Mesh partition routines
57from distribute_mesh  import pmesh_divide_metis
58from distribute_mesh  import build_submesh
59from distribute_mesh  import build_local_mesh
60from distribute_mesh  import send_submesh, rec_submesh, extract_hostmesh
61
62
63###############################
64# Read in processor information
65###############################
66
67numprocs = pypar.size()
68myid = pypar.rank()
69processor_name = pypar.get_processor_name()
70
71############################
72# Set the initial conditions
73############################
74
75rect = num.zeros( 4, num.float) # Buffer for results
76
77class Set_Stage:
78    """Set an initial condition with constant water height, for x<x0
79    """
80
81    def __init__(self, x0=0.25, x1=0.5, h=1.0):
82        self.x0 = x0
83        self.x1 = x1
84        self.= h
85
86    def __call__(self, x, y):
87        return self.h*((x>self.x0)&(x<self.x1))
88
89#######################
90# Partition the domain
91#######################
92
93if myid == 0:
94
95    # Read in the test files
96
97#    filename = 'test-100.tsh'
98#    filename = 'merimbula_10785_1.tsh'
99    filename = 'merimbula_43200.tsh'
100
101    # Build the whole domain
102   
103    domain_full = pmesh_to_domain_instance(filename, Domain)
104
105#    domain_full = cache(pmesh_to_domain_instance,
106#               (filename, Domain),
107#              dependencies = [filename])
108
109    rect = num.array(domain_full.get_extent(), num.float)
110    print rect
111
112    # Initialise the wave
113
114    #domain_full.set_quantity('stage', Set_Stage(200.0,300.0,1.0))
115    domain_full.set_quantity('stage', Set_Stage(756000.0,756500.0,2.0))
116#    domain_full.set_quantity('stage', Set_Stage(756000.0,756500.0,0.0))
117
118    # Subdivide the domain
119
120    # Note the different arguments compared with pmesh_divide,
121    # pmesh_divide_steve etc.
122   
123    nodes, triangles, boundary, triangles_per_proc, quantities = \
124         pmesh_divide_metis(domain_full, numprocs)
125
126    print triangles_per_proc
127   
128    rect = num.array(domain_full.get_extent(), num.float)
129
130    submesh = build_submesh(nodes, triangles, boundary,\
131                            quantities, triangles_per_proc)
132
133    # Send the mesh partition to the appropriate processor
134
135    for p in range(1, numprocs):
136      send_submesh(submesh, triangles_per_proc, p)
137
138    # Build the local mesh for processor 0
139
140    points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict = \
141             extract_hostmesh(submesh, triangles_per_proc)
142
143# Read in the mesh partition that belongs to this
144# processor (note that the information is in the
145# correct form for the GA data structure
146
147else:
148    points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict , \
149            no_full_nodes, no_full_trigs = rec_submesh(0)
150
151
152###########################################
153# Start the computations on each subpartion
154###########################################
155
156#if myid == 0:
157#    print 'ghost'
158#    print ghost_recv_dict
159#processor_name
160#if myid == 0:
161#    print 'full'
162#    print full_send_dict
163
164# The visualiser needs to know the size of the whole domain
165
166pypar.broadcast(rect,0)
167
168domain = Parallel_domain(points, vertices, boundary,
169                         full_send_dict  = full_send_dict,
170                         ghost_recv_dict = ghost_recv_dict)
171
172# Make a note of which triangles are full and which are ghost
173
174tri_full_flag = build_full_flag(domain, ghost_recv_dict)
175
176try:
177    #domain.initialise_visualiser(rect=rect)
178    #domain.visualiser.coloring['stage'] = True
179    #domain.visualiser.scale_z['stage'] = 0.2
180    #domain.visualiser.scale_z['elevation'] = 0.05
181    pass
182except:
183    print 'No visualiser'
184
185
186
187domain.default_order = 1
188
189#Boundaries
190from anuga.interface import Transmissive_boundary, Reflective_boundary
191
192T = Transmissive_boundary(domain)
193R = Reflective_boundary(domain)
194domain.set_boundary( {'outflow': R, 'inflow': R, 'inner':R, 'exterior': R, 'open':R, 'ghost':None} )
195
196
197domain.set_quantity('stage', quantities['stage'])
198domain.set_quantity('elevation', quantities['elevation'])
199
200domain.store = False
201
202#---------
203# Evolution
204t0 = time.time()
205
206print 'Processor %d on %s: No of elements %d'%(domain.processor,processor_name,domain.number_of_elements)
207yieldstep = 50.0
208finaltime = 500.0
209
210#yieldstep = 1000
211#finaltime = 40000
212
213#yieldstep = 1
214#finaltime = 1
215#processor_name
216#for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime):
217#    if myid == 0:
218#        domain.write_time()
219        #print 'Processor %d, Integral of stage %d'%\
220        #       (domain.processor,domain.quantities['stage'].get_integral())
221        #    print_test_stats(domain, tri_full_flag)
222
223
224# Profiling
225#import profile
226#profiler = profile.Profile()
227#result.dump_stats("profile." + str(numprocs) + "." + str(myid) + ".dat")
228
229## #New hotshot profiling
230## import hotshot
231## profiler = hotshot.Profile("hotshot." + str(numprocs) + "." + str(myid) + ".prof")
232## s = '''for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime):
233##   if myid == 0:
234##     domain.write_time()
235##   print_test_stats(domain, tri_full_flag)
236
237## '''
238## result = profiler.runctx(s, globals(), locals())
239## profiler.close()
240
241#from vtk_realtime_visualiser import Visualiser
242#V = Visualiser(domain,default_scale_z=100.0)
243#V.coloring['stage'] = True
244#V.coloring['elevation'] = False
245#V.setup['elevation']=True
246#V.updating['stage']=True
247#V.qcolor['stage'] = (0.1,0.4,0.99)
248
249
250#V.start()
251#V.idle.wait()
252#V.idle.clear()
253
254
255
256
257
258for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime):
259    if myid == 0:
260        domain.write_time()
261        #print_test_stats(domain, tri_full_flag)
262
263#    V.redraw_ready.set()
264#    V.idle.wait()
265#    V.idle.clear()
266#    V.unpaused.wait()
267
268
269#print 'P%d: That took %.2f seconds' %(myid, time.time()-t0)
270#print 'P%d: Communication time %.2f seconds' %(myid, domain.communication_time)
271#print 'P%d: Reduction Communication time %.2f seconds' %(myid, domain.communication_reduce_time)
272#print 'P%d: Broadcast time %.2f seconds' %(myid, domain.communication_broadcast_time)
273
274
275
276if myid == 0:
277    print 'That took %.2f seconds' %(time.time()-t0)
278    print 'Communication time %.2f seconds'%domain.communication_time
279    print 'Reduction Communication time %.2f seconds'%domain.communication_reduce_time
280    print 'Broadcast time %.2f seconds'%domain.communication_broadcast_time
281
282
283pypar.finalize()
Note: See TracBrowser for help on using the repository browser.