source: inundation/parallel/documentation/code/RunParallelSwMerimbulaMetis.py @ 2906

Last change on this file since 2906 was 2906, checked in by linda, 18 years ago

Made correction to the parallel report

  • Property svn:executable set to *
File size: 4.9 KB
Line 
1#!/usr/bin/env python
2###
3#########################################################
4#
5#  Main file for parallel mesh testing. Runs a shallow
6# water simulation using the merimbula mesh
7#
8#
9#
10# *) The (new) files that have been added to manage the
11# grid partitioning are
12#    +) pmesh_divide_metis.py: subdivide a pmesh
13#    +) build_submesh.py: build the submeshes on the host
14# processor.
15#    +) build_local.py: build the GA mesh datastructure
16# on each processor.
17#    +) build_commun.py: handle the communication between
18# the host and processors
19#
20#  Authors: Linda Stals, Steve Roberts and Matthew Hardy,
21# June 2005
22#
23#
24#
25#########################################################
26import sys
27import pypar    # The Python-MPI interface
28import time
29
30from os import sep
31sys.path.append('..'+sep+'pyvolution')
32
33# Numeric arrays
34
35from Numeric import array, zeros, Float
36
37# pmesh
38
39from shallow_water import Domain
40from parallel_shallow_water import Parallel_Domain
41from pmesh2domain import pmesh_to_domain_instance
42
43# Mesh partition routines
44
45from pmesh_divide import pmesh_divide_metis
46from build_submesh import build_submesh, extract_hostmesh
47from build_local   import build_local_mesh
48from build_commun  import send_submesh, rec_submesh
49
50###############################
51# Read in processor information
52###############################
53
54numprocs = pypar.size()
55myid = pypar.rank()
56processor_name = pypar.Get_processor_name()
57
58############################
59# Set the initial conditions
60############################
61
62rect = zeros( 4, Float) # Buffer for results
63
64class Set_Stage:
65    """Set an initial condition with constant water height, for x<x0
66    """
67
68    def __init__(self, x0=0.25, x1=0.5, h=1.0):
69        self.x0 = x0
70        self.x1 = x1
71        self.= h
72
73    def __call__(self, x, y):
74        return self.h*((x>self.x0)&(x<self.x1))
75
76#######################
77# Partition the mesh
78#######################
79
80if myid == 0:
81
82    # Read in the test files
83
84    filename = 'merimbula_10785_1.tsh'
85
86    # Build the whole mesh
87   
88    mesh_full = pmesh_to_domain_instance(filename, Domain)
89
90    # Define the domain boundaries for visualisation
91
92    rect = array(mesh_full.xy_extent, Float)
93
94    # Initialise the wave
95
96    mesh_full.set_quantity('stage', Set_Stage(756000.0,756500.0,2.0))
97
98    # Subdivide the mesh
99   
100    nodes, triangles, boundary, triangles_per_proc, quantities = \
101         pmesh_divide_metis(mesh_full, numprocs)
102
103    # Build the mesh that should be assigned to each processor,
104    # this includes ghost nodes and the communicaiton pattern
105   
106    submesh = build_submesh(nodes, triangles, boundary,\
107                            quantities, triangles_per_proc)
108
109    # Send the mesh partition to the appropriate processor
110
111    for p in range(1, numprocs):
112      send_submesh(submesh, triangles_per_proc, p)
113
114    # Build the local mesh for processor 0
115   
116    points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict = \
117              extract_hostmesh(submesh, triangles_per_proc)
118
119else:
120   
121    # Read in the mesh partition that belongs to this
122    # processor (note that the information is in the
123    # correct form for the GA data structure)
124
125    points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict \
126            = rec_submesh(0)
127
128
129###########################################
130# Start the computations on each subpartion
131###########################################
132
133# The visualiser needs to know the size of the whole domain
134
135pypar.broadcast(rect,0)
136
137# Build the domain for this processor
138
139domain = Parallel_Domain(points, vertices, boundary,
140                         full_send_dict  = full_send_dict,
141                         ghost_recv_dict = ghost_recv_dict)
142
143# Visualise the domain
144
145try:
146    domain.initialise_visualiser(rect=rect)
147    domain.visualiser.scale_z['stage'] = 0.2
148    domain.visualiser.scale_z['elevation'] = 0.05
149except:
150    print 'No visualiser'
151
152
153domain.default_order = 1
154
155# Define the boundaries, including the ghost boundary
156
157from parallel_shallow_water import Transmissive_boundary, Reflective_boundary
158
159T = Transmissive_boundary(domain)
160R = Reflective_boundary(domain)
161domain.set_boundary( {'outflow': R, 'inflow': R, 'inner':R, 'exterior': R, \
162                      'open':R, 'ghost':None} )
163
164
165# Set the initial quantities
166
167domain.set_quantity('stage', quantities['stage'])
168domain.set_quantity('elevation', quantities['elevation'])
169
170domain.store = False
171
172# Set the number of time steps, as well as the start and end time
173
174t0 = time.time()
175yieldstep = 1
176finaltime = 90
177
178
179# Start the evolve calculations
180
181for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime):
182    if myid == 0:
183        domain.write_time()
184
185# Print some timing statistics
186
187if myid == 0:
188    print 'That took %.2f seconds' %(time.time()-t0)
189    print 'Communication time %.2f seconds'%domain.communication_time
190    print 'Reduction Communication time %.2f seconds'\
191          %domain.communication_reduce_time
192    print 'Broadcast time %.2f seconds'\
193          %domain.communication_broadcast_time
Note: See TracBrowser for help on using the repository browser.