source: inundation/parallel/run_parallel_sw_merimbula.py @ 2152

Last change on this file since 2152 was 2152, checked in by linda, 18 years ago

Added caching to the sw_merimbula files. Also updated some of the comments

File size: 6.7 KB
Line 
1#!/usr/bin/env python
2#########################################################
3#
4#  Main file for parallel mesh testing.
5#
6#  This is a modification of the run_parallel_advection.py
7# file.
8#
9#
10# *) The (new) files that have been added to manage the
11# grid partitioning are
12#    +) pmesh_divide.py: subdivide a pmesh
13#    +) build_submesh.py: build the submeshes on the host
14# processor.
15#    +) build_local.py: build the GA mesh datastructure
16# on each processor.
17#    +) build_commun.py: handle the communication between
18# the host and processors
19#
20# *) Things still to do:
21#    +) Overlap the communication and computation: The
22# communication routines in build_commun.py should be
23# interdispersed in the build_submesh.py and build_local.py
24# files. This will overlap the communication and
25# computation and will be far more efficient. This should
26# be done after more testing and there more confidence in
27# the subpartioning.
28#    +) Much more testing especially with large numbers of
29# processors.
30#  Authors: Linda Stals, Steve Roberts and Matthew Hardy,
31# June 2005
32#
33#
34#
35#########################################################
36
37import sys
38import pypar    # The Python-MPI interface
39import time
40
41from os import sep
42sys.path.append('..'+sep+'pyvolution')
43
44# Numeric arrays
45
46from Numeric import array, zeros, Float
47
48# pmesh
49
50from shallow_water import Domain
51from parallel_shallow_water import Parallel_Domain
52from pmesh2domain import pmesh_to_domain_instance
53
54# Reuse previous mesh import
55
56from caching import cache
57
58# Mesh partition routines
59
60from pmesh_divide import pmesh_divide, pmesh_divide_steve
61from build_submesh import build_submesh, extract_hostmesh
62from build_local   import build_local_mesh
63from build_commun  import send_submesh, rec_submesh
64
65
66###############################
67# Read in processor information
68###############################
69
70numprocs = pypar.size()
71myid = pypar.rank()
72processor_name = pypar.Get_processor_name()
73
74############################
75# Set the initial conditions
76############################
77
78rect = zeros( 4, Float) # Buffer for results
79
80class Set_Stage:
81    """Set an initial condition with constant water height, for x<x0
82    """
83
84    def __init__(self, x0=0.25, x1=0.5, h=1.0):
85        self.x0 = x0
86        self.x1 = x1
87        self.= h
88
89    def __call__(self, x, y):
90        return self.h*((x>self.x0)&(x<self.x1))
91
92#######################
93# Partition the domain
94#######################
95
96if myid == 0:
97
98    # Read in the test files
99
100#    filename = 'test-100.tsh'
101    filename = 'merimbula_10785_1.tsh'
102    nx = numprocs
103    ny = 1
104    if nx*ny != numprocs:
105        print "WARNING: number of subboxes is not equal to the number of proc"
106
107    # Build the whole domain
108   
109#    domain_full = pmesh_to_domain_instance(filename, Domain)
110
111    domain_full = cache(pmesh_to_domain_instance,
112               (filename, Domain),
113              dependencies = [filename])
114
115    rect = array(domain_full.xy_extent, Float)
116   
117    # Initialise the wave
118   
119#    domain_full.set_quantity('stage', Set_Stage(200.0,300.0,1.0))
120    domain_full.set_quantity('stage', Set_Stage(756000.0,756500.0,2.0))
121
122    # Subdivide the domain
123   
124    nodes, triangles, boundary, triangles_per_proc, quantities = \
125         pmesh_divide_steve(domain_full, nx, ny)
126
127    submesh = build_submesh(nodes, triangles, boundary,\
128                            quantities, triangles_per_proc)
129   
130    # Send the mesh partition to the appropriate processor
131
132    for p in range(1, numprocs):
133      send_submesh(submesh, triangles_per_proc, p)
134
135    # Build the local mesh for processor 0
136
137    hostmesh = extract_hostmesh(submesh)
138    points, vertices, boundary, quantities, ghost_recv_dict, \
139            full_send_dict = build_local_mesh(hostmesh, 0, \
140                                              triangles_per_proc[0], \
141                                              numprocs)
142
143# Read in the mesh partition that belongs to this
144# processor (note that the information is in the
145# correct form for the GA data structure
146
147else:
148    points, vertices, boundary, quantities, ghost_recv_dict, \
149            full_send_dict = rec_submesh(0)
150
151
152###########################################
153# Start the computations on each subpartion
154###########################################
155
156##    ########### start profile testing
157##    import profile, pstats
158   
159##    # Create a profiler instance.
160##    pobject = profile.Profile()
161   
162   
163##    # Profile the test function and print the statistics.
164##    s = 'points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict \
165##            = rec_submesh(0)'
166
167##    presult = pobject.runctx(s,globals(),locals())
168##    presult.dump_stats("/tmp/profile.dat")
169   
170##    stats = pstats.Stats("/tmp/profile.dat")
171##    print stats.sort_stats('time').print_stats(40)
172
173######## end profile testing
174   
175
176# The visualiser needs to know the size of the whole domain
177
178pypar.broadcast(rect,0)
179
180
181domain = Parallel_Domain(points, vertices, boundary,
182                         full_send_dict  = full_send_dict,
183                         ghost_recv_dict = ghost_recv_dict)
184
185try:
186    domain.initialise_visualiser(rect=rect)
187    #domain.visualiser.coloring['stage'] = True
188    domain.visualiser.scale_z['stage'] = 0.2
189    domain.visualiser.scale_z['elevation'] = 0.05
190except:
191    print 'No visualiser'
192
193
194domain.default_order = 1
195
196#Boundaries
197from parallel_shallow_water import Transmissive_boundary, Reflective_boundary
198
199T = Transmissive_boundary(domain)
200R = Reflective_boundary(domain)
201domain.set_boundary( {'outflow': R, 'inflow': R, 'inner':R, 'exterior': R, 'open':R} )
202
203
204domain.set_quantity('stage', quantities['stage'])
205domain.set_quantity('elevation', quantities['elevation'])
206
207#domain.store = True
208#domain.filename = 'merimbula-%d' %domain.processor
209
210#---------
211# Evolution
212t0 = time.time()
213
214print 'Processor %d on %s: No of elements %d'%(domain.processor,processor_name,domain.number_of_elements)
215yieldstep = 0.05
216finaltime = 500.0
217
218yieldstep = 1
219finaltime = 30
220#processor_name
221for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime):
222    if myid == 0:
223        domain.write_time()
224        #print 'Processor %d, Integral of stage %d'%\
225        #       (domain.processor,domain.quantities['stage'].get_integral())
226
227
228#print 'P%d: That took %.2f seconds' %(myid, time.time()-t0)
229#print 'P%d: Communication time %.2f seconds' %(myid, domain.communication_time)
230#print 'P%d: Reduction Communication time %.2f seconds' %(myid, domain.communication_reduce_time)
231#print 'P%d: Broadcast time %.2f seconds' %(myid, domain.communication_broadcast_time)
232
233
234
235if myid == 0:
236    print 'That took %.2f seconds' %(time.time()-t0)
237    print 'Communication time %.2f seconds'%domain.communication_time
238    print 'Reduction Communication time %.2f seconds'%domain.communication_reduce_time
239    print 'Broadcast time %.2f seconds'%domain.communication_broadcast_time
Note: See TracBrowser for help on using the repository browser.