source: inundation/parallel/run_parallel_merimbula.py @ 1756

Last change on this file since 1756 was 1629, checked in by steve, 20 years ago
  • Property svn:executable set to *
File size: 5.0 KB
Line 
1#!/usr/bin/env python
2#########################################################
3#
4#  Main file for parallel mesh testing.
5#
6#  This is a modification of the run_parallel_advection.py
7# file.
8#
9#  *) The test files currently avaliable are of the form
10# test*.out, eg test_5l_4c.out. The term infront of the l
11# corresponds to the number of levels of refinement
12# required to build the grid, i.e. a higher number
13# corresponds to a finer grid. The term infront of the c
14# corresponds to the number of processors.
15#
16# *) The (new) files that have been added to manage the
17# grid partitioning are
18#    +) mg2ga.py: read in the test files.
19#    +) pmesh_divide.py: subdivide a pmesh
20#    +) build_submesh.py: build the submeshes on the host
21# processor.
22#    +) build_local.py: build the GA mesh datastructure
23# on each processor.
24#    +) build_commun.py: handle the communication between
25# the host and processors
26#
27# *) Things still to do:
28#    +) Overlap the communication and computation: The
29# communication routines in build_commun.py should be
30# interdispersed in the build_submesh.py and build_local.py
31# files. This will overlap the communication and
32# computation and will be far more efficient. This should
33# be done after more testing and there more confidence in
34# the subpartioning.
35#    +) Much more testing especially with large numbers of
36# processors.
37#  Authors: Linda Stals, Steve Roberts and Matthew Hardy,
38# June 2005
39#
40#
41#
42#########################################################
43import sys
44import pypar    # The Python-MPI interface
45import time
46
47
48from os import sep
49sys.path.append('..'+sep+'pyvolution')
50
51from Numeric import array, zeros, Float
52# pmesh
53
54#from shallow_water import Domain
55
56from pmesh2domain import pmesh_to_domain_instance
57from advection import Domain as Advection_Domain
58from parallel_advection import Parallel_Domain
59
60from generic_boundary_conditions import Transmissive_boundary
61# mesh partition routines
62
63from pmesh_divide  import pmesh_divide
64from build_submesh import build_submesh, extract_hostmesh
65from build_local   import build_local_mesh
66from build_commun  import send_submesh, rec_submesh
67
68
69class Set_Stage:
70    """Set an initial condition with constant water height, for x<x0
71    """
72
73    def __init__(self, x0=0.25, x1=0.5, h=1.0):
74        self.x0 = x0
75        self.x1 = x1
76        self.= h
77
78    def __call__(self, x, y):
79        return self.h*((x>self.x0)&(x<self.x1))
80
81# read in the processor information
82
83numprocs = pypar.size()
84myid = pypar.rank()
85processor_name = pypar.Get_processor_name()
86
87
88#-------
89# Domain
90rect = zeros( 4, Float) # Buffer for results
91
92if myid == 0:
93
94    # read in the test files
95
96    filename = 'test-100.tsh'
97#    filename = 'merimbula_10785.tsh'
98    nx = numprocs
99    ny = 1
100    if nx*ny != numprocs:
101        print "WARNING: number of subboxes is not equal to the number of proc"
102
103    domain_full = pmesh_to_domain_instance(filename, Domain)
104    domain_full.set_quantity('stage', Set_Stage(200.0,300.0,1.0))
105#    domain.set_quantity('stage', Set_Stage(756000.0,756500.0,4.0))
106
107    nodes, triangles, boundary, triangles_per_proc, quantities  =\
108            pmesh_divide(domain_full, nx, ny)
109
110    # subdivide the mesh
111    rect = array(domain_full.xy_extent, Float)
112
113    print rect
114
115    rect = array(rect, Float)
116
117    submesh = build_submesh(nodes, triangles, boundary, quantities, \
118                            triangles_per_proc)
119
120    # send the mesh partition to the appropriate processor
121
122    for p in range(1, numprocs):
123      send_submesh(submesh, triangles_per_proc, p)
124
125    hostmesh = extract_hostmesh(submesh)
126    [points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict] = \
127             build_local_mesh(hostmesh, 0, triangles_per_proc[0], numprocs)
128
129# read in the mesh partition that belongs to this
130# processor (note that the information is in the
131# correct form for the GA data structure
132
133else:
134    [points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict] = \
135             rec_submesh(0)
136
137#if myid == 0:
138#    print 'ghost'
139#    print ghost_recv_dict
140#
141#if myid == 0:
142#    print 'full'
143#    print full_send_dict
144
145
146pypar.broadcast(rect,0)
147print rect
148
149domain = Parallel_Domain(points, vertices, boundary,
150                                   full_send_dict  = full_send_dict,
151                                   ghost_recv_dict = ghost_recv_dict,
152                                   velocity = [0.1,0.0])
153
154domain.initialise_visualiser(rect=rect)
155
156#Boundaries
157
158T = Transmissive_boundary(domain)
159domain.set_boundary( {'outflow': T, 'inflow': T, 'inner':T, 'exterior': T, 'open':T} )
160
161
162
163domain.set_quantity('stage', quantities['stage'])
164
165#---------
166# Evolution
167t0 = time.time()
168domain.visualise = True
169#yieldstep = 1
170yieldstep = 1000
171finaltime = 50000
172# finaltime = 4000
173for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime):
174    if myid == 0:
175        domain.write_time()
176
177if myid == 0:
178    print 'That took %.2f seconds' %(time.time()-t0)
Note: See TracBrowser for help on using the repository browser.