source: inundation/parallel/documentation/code/RunParallelMerimbulaMetis.py @ 3315

Last change on this file since 3315 was 3184, checked in by linda, 19 years ago

Changed parallel_advection so the timestep size is updated straight
after the flux calculation. Also added results for advection on the
Merimbula grid

  • Property svn:executable set to *
File size: 4.6 KB
Line 
1#!/usr/bin/env python
2#########################################################
3#
4#  Main file for parallel mesh testing. Runs an advection
5# flow simulation using a rectangular mesh
6#
7#  This is a modification of the run_parallel_advection.py
8# file
9#
10#
11# *) The (new) files that have been added to manage the
12# grid partitioning are
13#    +) pmesh_divide_metis.py: subdivide a pmesh
14#    +) build_submesh.py: build the submeshes on the host
15# processor.
16#    +) build_local.py: build the GA mesh datastructure
17# on each processor.
18#    +) build_commun.py: handle the communication between
19# the host and processors
20#
21#  Authors: Linda Stals, Steve Roberts and Matthew Hardy,
22# June 2005
23#
24#
25#
26#########################################################
27import sys
28import pypar    # The Python-MPI interface
29import time
30
31
32from os import sep
33sys.path.append('..'+sep+'pyvolution')
34sys.path.append('..'+sep+'parallel')
35
36# Numeric arrays
37
38from Numeric import array, zeros, Float
39
40# pmesh
41
42from pyvolution.pmesh2domain import pmesh_to_domain_instance
43from pyvolution.advection import Domain as Advection_Domain
44from parallel.parallel_advection import Parallel_Domain
45
46from pyvolution.generic_boundary_conditions import Transmissive_boundary
47
48# Mesh partition routines
49
50from parallel.pmesh_divide  import pmesh_divide_metis
51from parallel.build_submesh import build_submesh
52from parallel.build_local   import build_local_mesh
53from parallel.build_commun  import send_submesh, rec_submesh, extract_hostmesh
54
55
56############################
57# Set the initial conditions
58############################
59
60class Set_Stage:
61    """Set an initial condition with constant water height, for x<x0
62    """
63
64    def __init__(self, x0=0.25, x1=0.5, h=1.0):
65        self.x0 = x0
66        self.x1 = x1
67        self.= h
68
69    def __call__(self, x, y):
70        return self.h*((x>self.x0)&(x<self.x1))
71
72rect = zeros( 4, Float) # Buffer for results
73
74###############################
75# Read in processor information
76###############################
77
78numprocs = pypar.size()
79myid = pypar.rank()
80processor_name = pypar.Get_processor_name()
81
82#######################
83# Partition the domain
84#######################
85
86if myid == 0:
87
88    # Read in the test files
89
90    filename = 'parallel/merimbula_10785.tsh'
91
92    mesh_full = pmesh_to_domain_instance(filename, Advection_Domain)
93    mesh_full.set_quantity('stage', Set_Stage(756000.0,756500.0,4.0))
94
95    # Define the domain boundaries for visualisation
96   
97    rect = array(mesh_full.xy_extent, Float)
98
99    # Subdivide the mesh
100
101    nodes, triangles, boundary, triangles_per_proc, quantities  =\
102            pmesh_divide_metis(mesh_full, numprocs)
103
104    # Build the mesh that should be assigned to each processor,
105    # this includes ghost nodes and the communicaiton pattern
106   
107    submesh = build_submesh(nodes, triangles, boundary, quantities, \
108                            triangles_per_proc)
109
110    # Send the mesh partition to the appropriate processor
111
112    for p in range(1, numprocs):
113      send_submesh(submesh, triangles_per_proc, p)
114
115    # Build the local mesh for processor 0
116
117    points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict = \
118             extract_hostmesh(submesh, triangles_per_proc)
119
120
121else:
122    # Read in the mesh partition that belongs to this
123    # processor (note that the information is in the
124    # correct form for the GA data structure
125
126    points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict = \
127             rec_submesh(0)
128
129###########################################
130# Start the computations on each subpartion
131###########################################
132
133# The visualiser needs to know the size of the whole domain
134
135pypar.broadcast(rect,0)
136
137# Build the domain for this processor
138
139domain = Parallel_Domain(points, vertices, boundary,
140                         full_send_dict  = full_send_dict,
141                         ghost_recv_dict = ghost_recv_dict,
142                         velocity = [0.1,0.0])
143
144# Visualise the domain
145
146try:
147    domain.initialise_visualiser(rect=rect)
148except:
149    print 'No visualiser'
150   
151# Define the boundaries, including the ghost boundary
152
153T = Transmissive_boundary(domain)
154domain.set_boundary( {'outflow': T, 'inflow': T, 'inner':T, \
155                      'exterior': T, 'open':T, 'ghost':None} )
156
157# Set the initial quantities
158
159domain.set_quantity('stage', quantities['stage'])
160
161# Set the number of time steps, as well as the start and end time
162
163t0 = time.time()
164yieldstep = 1000
165finaltime = 50000
166
167# Start the evolve calculations
168
169for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime):
170    if myid == 0:
171        domain.write_time()
172
173# Print some timing statistics
174
175if myid == 0:
176    print 'That took %.2f seconds' %(time.time()-t0)
Note: See TracBrowser for help on using the repository browser.