source: inundation/parallel/documentation/code/RunParallelMerimbulaMetis.py @ 2906

Last change on this file since 2906 was 2906, checked in by linda, 18 years ago

Made correction to the parallel report

  • Property svn:executable set to *
File size: 4.5 KB
Line 
1#!/usr/bin/env python
2#########################################################
3#
4#  Main file for parallel mesh testing. Runs an advection
5# flow simulation using a rectangular mesh
6#
7#  This is a modification of the run_parallel_advection.py
8# file
9#
10#
11# *) The (new) files that have been added to manage the
12# grid partitioning are
13#    +) pmesh_divide_metis.py: subdivide a pmesh
14#    +) build_submesh.py: build the submeshes on the host
15# processor.
16#    +) build_local.py: build the GA mesh datastructure
17# on each processor.
18#    +) build_commun.py: handle the communication between
19# the host and processors
20#
21#  Authors: Linda Stals, Steve Roberts and Matthew Hardy,
22# June 2005
23#
24#
25#
26#########################################################
27import sys
28import pypar    # The Python-MPI interface
29import time
30
31
32from os import sep
33sys.path.append('..'+sep+'pyvolution')
34
35# Numeric arrays
36
37from Numeric import array, zeros, Float
38
39# pmesh
40
41from pmesh2domain import pmesh_to_domain_instance
42from advection import Domain as Advection_Domain
43from parallel_advection import Parallel_Domain
44
45from generic_boundary_conditions import Transmissive_boundary
46
47# Mesh partition routines
48
49from pmesh_divide  import pmesh_divide_metis
50from build_submesh import build_submesh, extract_hostmesh
51from build_local   import build_local_mesh
52from build_commun  import send_submesh, rec_submesh
53
54
55############################
56# Set the initial conditions
57############################
58
59class Set_Stage:
60    """Set an initial condition with constant water height, for x<x0
61    """
62
63    def __init__(self, x0=0.25, x1=0.5, h=1.0):
64        self.x0 = x0
65        self.x1 = x1
66        self.= h
67
68    def __call__(self, x, y):
69        return self.h*((x>self.x0)&(x<self.x1))
70
71rect = zeros( 4, Float) # Buffer for results
72
73###############################
74# Read in processor information
75###############################
76
77numprocs = pypar.size()
78myid = pypar.rank()
79processor_name = pypar.Get_processor_name()
80
81#######################
82# Partition the domain
83#######################
84
85if myid == 0:
86
87    # Read in the test files
88
89    filename = 'merimbula_10785.tsh'
90
91    mesh_full = pmesh_to_domain_instance(filename, Advection_Domain)
92    mesh_full.set_quantity('stage', Set_Stage(756000.0,756500.0,4.0))
93
94    # Define the domain boundaries for visualisation
95   
96    rect = array(mesh_full.xy_extent, Float)
97
98    # Subdivide the mesh
99
100    nodes, triangles, boundary, triangles_per_proc, quantities  =\
101            pmesh_divide_metis(mesh_full, numprocs)
102
103    # Build the mesh that should be assigned to each processor,
104    # this includes ghost nodes and the communicaiton pattern
105   
106    submesh = build_submesh(nodes, triangles, boundary, quantities, \
107                            triangles_per_proc)
108
109    # Send the mesh partition to the appropriate processor
110
111    for p in range(1, numprocs):
112      send_submesh(submesh, triangles_per_proc, p)
113
114    # Build the local mesh for processor 0
115
116     points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict =\
117             extract_hostmesh(submesh, triangles_per_proc)
118
119
120else:
121    # Read in the mesh partition that belongs to this
122    # processor (note that the information is in the
123    # correct form for the GA data structure
124
125    points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict = \
126             rec_submesh(0)
127
128###########################################
129# Start the computations on each subpartion
130###########################################
131
132# The visualiser needs to know the size of the whole domain
133
134pypar.broadcast(rect,0)
135
136# Build the domain for this processor
137
138domain = Parallel_Domain(points, vertices, boundary,
139                         full_send_dict  = full_send_dict,
140                         ghost_recv_dict = ghost_recv_dict,
141                         velocity = [0.1,0.0])
142
143# Visualise the domain
144
145try:
146    domain.initialise_visualiser(rect=rect)
147except:
148    print 'No visualiser'
149   
150# Define the boundaries, including the ghost boundary
151
152T = Transmissive_boundary(domain)
153domain.set_boundary( {'outflow': T, 'inflow': T, 'inner':T, \
154                      'exterior': T, 'open':T, 'ghost':None} )
155
156# Set the initial quantities
157
158domain.set_quantity('stage', quantities['stage'])
159
160# Set the number of time steps, as well as the start and end time
161
162t0 = time.time()
163yieldstep = 1000
164finaltime = 50000
165
166# Start the evolve calculations
167
168for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime):
169    if myid == 0:
170        domain.write_time()
171
172# Print some timing statistics
173
174if myid == 0:
175    print 'That took %.2f seconds' %(time.time()-t0)
Note: See TracBrowser for help on using the repository browser.