source: inundation/parallel/run_parallel_sw_merimbula.py @ 2378

Last change on this file since 2378 was 2152, checked in by linda, 19 years ago

Added caching to the sw_merimbula files. Also updated some of the comments

File size: 6.7 KB
RevLine 
[2090]1#!/usr/bin/env python
2#########################################################
3#
4#  Main file for parallel mesh testing.
5#
6#  This is a modification of the run_parallel_advection.py
7# file.
8#
9#
10# *) The (new) files that have been added to manage the
11# grid partitioning are
12#    +) pmesh_divide.py: subdivide a pmesh
13#    +) build_submesh.py: build the submeshes on the host
14# processor.
15#    +) build_local.py: build the GA mesh datastructure
16# on each processor.
17#    +) build_commun.py: handle the communication between
18# the host and processors
19#
20# *) Things still to do:
21#    +) Overlap the communication and computation: The
22# communication routines in build_commun.py should be
23# interdispersed in the build_submesh.py and build_local.py
24# files. This will overlap the communication and
25# computation and will be far more efficient. This should
26# be done after more testing and there more confidence in
27# the subpartioning.
28#    +) Much more testing especially with large numbers of
29# processors.
30#  Authors: Linda Stals, Steve Roberts and Matthew Hardy,
31# June 2005
32#
33#
34#
35#########################################################
[2152]36
[2090]37import sys
38import pypar    # The Python-MPI interface
39import time
40
41from os import sep
42sys.path.append('..'+sep+'pyvolution')
43
[2152]44# Numeric arrays
45
46from Numeric import array, zeros, Float
47
[2090]48# pmesh
49
50from shallow_water import Domain
51from parallel_shallow_water import Parallel_Domain
[2152]52from pmesh2domain import pmesh_to_domain_instance
[2090]53
[2152]54# Reuse previous mesh import
[2090]55
[2152]56from caching import cache
57
58# Mesh partition routines
59
[2090]60from pmesh_divide import pmesh_divide, pmesh_divide_steve
[2152]61from build_submesh import build_submesh, extract_hostmesh
62from build_local   import build_local_mesh
63from build_commun  import send_submesh, rec_submesh
[2090]64
65
[2152]66###############################
67# Read in processor information
68###############################
69
[2090]70numprocs = pypar.size()
71myid = pypar.rank()
72processor_name = pypar.Get_processor_name()
73
[2152]74############################
75# Set the initial conditions
76############################
77
[2090]78rect = zeros( 4, Float) # Buffer for results
79
80class Set_Stage:
81    """Set an initial condition with constant water height, for x<x0
82    """
83
84    def __init__(self, x0=0.25, x1=0.5, h=1.0):
85        self.x0 = x0
86        self.x1 = x1
87        self.= h
88
89    def __call__(self, x, y):
90        return self.h*((x>self.x0)&(x<self.x1))
91
[2152]92#######################
93# Partition the domain
94#######################
[2090]95
96if myid == 0:
97
[2152]98    # Read in the test files
[2090]99
[2131]100#    filename = 'test-100.tsh'
101    filename = 'merimbula_10785_1.tsh'
[2090]102    nx = numprocs
103    ny = 1
104    if nx*ny != numprocs:
105        print "WARNING: number of subboxes is not equal to the number of proc"
106
[2152]107    # Build the whole domain
108   
109#    domain_full = pmesh_to_domain_instance(filename, Domain)
[2090]110
[2152]111    domain_full = cache(pmesh_to_domain_instance,
112               (filename, Domain),
113              dependencies = [filename])
114
115    rect = array(domain_full.xy_extent, Float)
116   
117    # Initialise the wave
118   
[2131]119#    domain_full.set_quantity('stage', Set_Stage(200.0,300.0,1.0))
120    domain_full.set_quantity('stage', Set_Stage(756000.0,756500.0,2.0))
[2090]121
[2152]122    # Subdivide the domain
123   
[2090]124    nodes, triangles, boundary, triangles_per_proc, quantities = \
125         pmesh_divide_steve(domain_full, nx, ny)
126
127    submesh = build_submesh(nodes, triangles, boundary,\
128                            quantities, triangles_per_proc)
129   
[2152]130    # Send the mesh partition to the appropriate processor
[2090]131
132    for p in range(1, numprocs):
133      send_submesh(submesh, triangles_per_proc, p)
134
[2152]135    # Build the local mesh for processor 0
136
[2090]137    hostmesh = extract_hostmesh(submesh)
[2152]138    points, vertices, boundary, quantities, ghost_recv_dict, \
139            full_send_dict = build_local_mesh(hostmesh, 0, \
140                                              triangles_per_proc[0], \
141                                              numprocs)
[2090]142
[2152]143# Read in the mesh partition that belongs to this
[2090]144# processor (note that the information is in the
145# correct form for the GA data structure
146
147else:
[2152]148    points, vertices, boundary, quantities, ghost_recv_dict, \
149            full_send_dict = rec_submesh(0)
[2090]150
151
[2152]152###########################################
153# Start the computations on each subpartion
154###########################################
155
[2090]156##    ########### start profile testing
157##    import profile, pstats
158   
159##    # Create a profiler instance.
160##    pobject = profile.Profile()
161   
162   
163##    # Profile the test function and print the statistics.
164##    s = 'points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict \
165##            = rec_submesh(0)'
166
167##    presult = pobject.runctx(s,globals(),locals())
168##    presult.dump_stats("/tmp/profile.dat")
169   
170##    stats = pstats.Stats("/tmp/profile.dat")
171##    print stats.sort_stats('time').print_stats(40)
172
173######## end profile testing
174   
[2152]175
176# The visualiser needs to know the size of the whole domain
177
[2090]178pypar.broadcast(rect,0)
179
[2152]180
[2090]181domain = Parallel_Domain(points, vertices, boundary,
182                         full_send_dict  = full_send_dict,
183                         ghost_recv_dict = ghost_recv_dict)
184
185try:
186    domain.initialise_visualiser(rect=rect)
187    #domain.visualiser.coloring['stage'] = True
188    domain.visualiser.scale_z['stage'] = 0.2
189    domain.visualiser.scale_z['elevation'] = 0.05
190except:
191    print 'No visualiser'
192
193
194domain.default_order = 1
195
196#Boundaries
197from parallel_shallow_water import Transmissive_boundary, Reflective_boundary
198
199T = Transmissive_boundary(domain)
200R = Reflective_boundary(domain)
201domain.set_boundary( {'outflow': R, 'inflow': R, 'inner':R, 'exterior': R, 'open':R} )
202
203
204domain.set_quantity('stage', quantities['stage'])
205domain.set_quantity('elevation', quantities['elevation'])
206
207#domain.store = True
208#domain.filename = 'merimbula-%d' %domain.processor
209
210#---------
211# Evolution
212t0 = time.time()
213
214print 'Processor %d on %s: No of elements %d'%(domain.processor,processor_name,domain.number_of_elements)
215yieldstep = 0.05
216finaltime = 500.0
217
218yieldstep = 1
219finaltime = 30
220#processor_name
221for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime):
222    if myid == 0:
223        domain.write_time()
224        #print 'Processor %d, Integral of stage %d'%\
225        #       (domain.processor,domain.quantities['stage'].get_integral())
226
227
228#print 'P%d: That took %.2f seconds' %(myid, time.time()-t0)
229#print 'P%d: Communication time %.2f seconds' %(myid, domain.communication_time)
230#print 'P%d: Reduction Communication time %.2f seconds' %(myid, domain.communication_reduce_time)
231#print 'P%d: Broadcast time %.2f seconds' %(myid, domain.communication_broadcast_time)
232
233
234
235if myid == 0:
236    print 'That took %.2f seconds' %(time.time()-t0)
237    print 'Communication time %.2f seconds'%domain.communication_time
238    print 'Reduction Communication time %.2f seconds'%domain.communication_reduce_time
239    print 'Broadcast time %.2f seconds'%domain.communication_broadcast_time
Note: See TracBrowser for help on using the repository browser.