source: inundation/parallel/run_parallel_sw_merimbula_metis.py @ 2198

Last change on this file since 2198 was 2198, checked in by jack, 18 years ago

Update visualisation calling on rectangle and meriumbula(metis) cases.
Reset final time on merimbula(metis) for running on the LC.

  • Property svn:executable set to *
File size: 6.9 KB
Line 
1#!/usr/bin/env python
2###
3# Same as run_parallel_sw_merimbula.py, but uses pmesh_divide_metis
4# to partition the mesh.
5#########################################################
6#
7#  Main file for parallel mesh testing.
8#
9#  This is a modification of the run_parallel_advection.py
10# file.
11#
12#
13# *) The (new) files that have been added to manage the
14# grid partitioning are
15#    +) pmesh_divide.py: subdivide a pmesh
16#    +) build_submesh.py: build the submeshes on the host
17# processor.
18#    +) build_local.py: build the GA mesh datastructure
19# on each processor.
20#    +) build_commun.py: handle the communication between
21# the host and processors
22#
23# *) Things still to do:
24#    +) Overlap the communication and computation: The
25# communication routines in build_commun.py should be
26# interdispersed in the build_submesh.py and build_local.py
27# files. This will overlap the communication and
28# computation and will be far more efficient. This should
29# be done after more testing and there more confidence in
30# the subpartioning.
31#    +) Much more testing especially with large numbers of
32# processors.
33#  Authors: Linda Stals, Steve Roberts and Matthew Hardy,
34# June 2005
35#
36#
37#
38#########################################################
39import sys
40import pypar    # The Python-MPI interface
41import time
42
43
44from os import sep
45sys.path.append('..'+sep+'pyvolution')
46
47# Numeric arrays
48
49from Numeric import array, zeros, Float
50
51# pmesh
52
53from shallow_water import Domain
54from parallel_shallow_water import Parallel_Domain
55from pmesh2domain import pmesh_to_domain_instance
56
57# Reuse previous mesh import
58
59from caching import cache
60
61# Mesh partition routines
62
63from pmesh_divide import pmesh_divide_metis
64from build_submesh import build_submesh, extract_hostmesh
65from build_local   import build_local_mesh
66from build_commun  import send_submesh, rec_submesh
67
68###############################
69# Read in processor information
70###############################
71
72numprocs = pypar.size()
73myid = pypar.rank()
74processor_name = pypar.Get_processor_name()
75
76############################
77# Set the initial conditions
78############################
79
80rect = zeros( 4, Float) # Buffer for results
81
82class Set_Stage:
83    """Set an initial condition with constant water height, for x<x0
84    """
85
86    def __init__(self, x0=0.25, x1=0.5, h=1.0):
87        self.x0 = x0
88        self.x1 = x1
89        self.= h
90
91    def __call__(self, x, y):
92        return self.h*((x>self.x0)&(x<self.x1))
93
94#######################
95# Partition the domain
96#######################
97
98if myid == 0:
99
100    # Read in the test files
101
102#    filename = 'test-100.tsh'
103    filename = 'merimbula_10785_1.tsh'
104
105    # Build the whole domain
106   
107#    domain_full = pmesh_to_domain_instance(filename, Domain)
108
109    domain_full = cache(pmesh_to_domain_instance,
110               (filename, Domain),
111              dependencies = [filename])
112
113    rect = array(domain_full.xy_extent, Float)
114
115    # Initialise the wave
116
117#    domain_full.set_quantity('stage', Set_Stage(200.0,300.0,1.0))
118    domain_full.set_quantity('stage', Set_Stage(756000.0,756500.0,2.0))
119
120    # Subdivide the domain
121
122    # Note the different arguments compared with pmesh_divide,
123    # pmesh_divide_steve etc.
124   
125    nodes, triangles, boundary, triangles_per_proc, quantities = \
126         pmesh_divide_metis(domain_full, numprocs)
127
128    rect = array(domain_full.xy_extent, Float)
129
130    submesh = build_submesh(nodes, triangles, boundary,\
131                            quantities, triangles_per_proc)
132
133    # Send the mesh partition to the appropriate processor
134
135    for p in range(1, numprocs):
136      send_submesh(submesh, triangles_per_proc, p)
137
138    # Build the local mesh for processor 0
139
140    hostmesh = extract_hostmesh(submesh)
141    points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict = \
142             build_local_mesh(hostmesh, 0, triangles_per_proc[0], numprocs)
143
144# Read in the mesh partition that belongs to this
145# processor (note that the information is in the
146# correct form for the GA data structure
147
148else:
149    points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict \
150            = rec_submesh(0)
151
152
153###########################################
154# Start the computations on each subpartion
155###########################################
156
157#if myid == 0:
158#    print 'ghost'
159#    print ghost_recv_dict
160#processor_name
161#if myid == 0:
162#    print 'full'
163#    print full_send_dict
164
165# The visualiser needs to know the size of the whole domain
166
167pypar.broadcast(rect,0)
168
169domain = Parallel_Domain(points, vertices, boundary,
170                         full_send_dict  = full_send_dict,
171                         ghost_recv_dict = ghost_recv_dict)
172
173
174#try:
175#    domain.initialise_visualiser(rect=rect)
176#    domain.visualiser.updating['stage'] = True
177#    domain.visualiser.setup['elevation'] = True
178#    domain.visualiser.qcolor['stage'] = (0.0, 0.0, 0.8)
179#    domain.visualiser.scale_z['stage'] = 250
180#    domain.visualiser.scale_z['elevation'] = 250
181#except:
182#    print 'No visualiser'
183
184
185domain.default_order = 1
186
187#Boundaries
188from parallel_shallow_water import Transmissive_boundary, Reflective_boundary
189
190T = Transmissive_boundary(domain)
191R = Reflective_boundary(domain)
192domain.set_boundary( {'outflow': R, 'inflow': R, 'inner':R, 'exterior': R, 'open':R} )
193
194
195domain.set_quantity('stage', quantities['stage'])
196domain.set_quantity('elevation', quantities['elevation'])
197
198#domain.store = True
199#domain.filename = 'merimbula-%d' %domain.processor
200
201#---------
202# Evolution
203t0 = time.time()
204
205print 'Processor %d on %s: No of elements %d'%(domain.processor,processor_name,domain.number_of_elements)
206yieldstep = 0.05
207finaltime = 500.0
208
209yieldstep = 1
210finaltime = 90
211
212#yieldstep = 1
213#finaltime = 1
214#processor_name
215#for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime):
216#    if myid == 0:
217#        domain.write_time()
218        #print 'Processor %d, Integral of stage %d'%\
219        #       (domain.processor,domain.quantities['stage'].get_integral())
220
221# Profiling
222#import profile
223#profiler = profile.Profile()
224#result.dump_stats("profile." + str(numprocs) + "." + str(myid) + ".dat")
225
226#New hotshot profiling
227import hotshot
228profiler = hotshot.Profile("hotshot." + str(numprocs) + "." + str(myid) + ".prof")
229s = '''for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime):
230  if myid == 0:
231    domain.write_time()
232'''
233result = profiler.runctx(s, globals(), locals())
234profiler.close()
235
236#print 'P%d: That took %.2f seconds' %(myid, time.time()-t0)
237#print 'P%d: Communication time %.2f seconds' %(myid, domain.communication_time)
238#print 'P%d: Reduction Communication time %.2f seconds' %(myid, domain.communication_reduce_time)
239#print 'P%d: Broadcast time %.2f seconds' %(myid, domain.communication_broadcast_time)
240
241
242
243if myid == 0:
244    print 'That took %.2f seconds' %(time.time()-t0)
245    print 'Communication time %.2f seconds'%domain.communication_time
246    print 'Reduction Communication time %.2f seconds'%domain.communication_reduce_time
247    print 'Broadcast time %.2f seconds'%domain.communication_broadcast_time
Note: See TracBrowser for help on using the repository browser.