Ignore:
Timestamp:
Dec 16, 2005, 9:59:58 AM (19 years ago)
Author:
linda
Message:

Added caching to the sw_merimbula files. Also updated some of the comments

File:
1 edited

Legend:

Unmodified
Added
Removed
  • inundation/parallel/run_parallel_sw_merimbula.py

    r2131 r2152  
    77# file.
    88#
    9 #  *) The test files currently avaliable are of the form
    10 # test*.out, eg test_5l_4c.out. The term infront of the l
    11 # corresponds to the number of levels of refinement
    12 # required to build the grid, i.e. a higher number
    13 # corresponds to a finer grid. The term infront of the c
    14 # corresponds to the number of processors.
    159#
    1610# *) The (new) files that have been added to manage the
     
    4034#
    4135#########################################################
     36
    4237import sys
    4338import pypar    # The Python-MPI interface
    4439import time
    4540
    46 
    4741from os import sep
    4842sys.path.append('..'+sep+'pyvolution')
    4943
    50 from Numeric import array
     44# Numeric arrays
     45
     46from Numeric import array, zeros, Float
     47
    5148# pmesh
    52 
    53 #from shallow_water import Domain
    5449
    5550from shallow_water import Domain
    5651from parallel_shallow_water import Parallel_Domain
    57 
    58 # mesh partition routines
     52from pmesh2domain import pmesh_to_domain_instance
     53
     54# Reuse previous mesh import
     55
     56from caching import cache
     57
     58# Mesh partition routines
    5959
    6060from pmesh_divide import pmesh_divide, pmesh_divide_steve
    61 from build_submesh import *
    62 from build_local import *
    63 from build_commun import *
    64 from pmesh2domain import pmesh_to_domain_instance
    65 
    66 # read in the processor information
     61from build_submesh import build_submesh, extract_hostmesh
     62from build_local   import build_local_mesh
     63from build_commun  import send_submesh, rec_submesh
     64
     65
     66###############################
     67# Read in processor information
     68###############################
    6769
    6870numprocs = pypar.size()
     
    7072processor_name = pypar.Get_processor_name()
    7173
    72 #-------
    73 # Domain
     74############################
     75# Set the initial conditions
     76############################
     77
    7478rect = zeros( 4, Float) # Buffer for results
    7579
     
    8690        return self.h*((x>self.x0)&(x<self.x1))
    8791
     92#######################
     93# Partition the domain
     94#######################
    8895
    8996if myid == 0:
    9097
    91     # read in the test files
     98    # Read in the test files
    9299
    93100#    filename = 'test-100.tsh'
     
    98105        print "WARNING: number of subboxes is not equal to the number of proc"
    99106
    100     domain_full = pmesh_to_domain_instance(filename, Domain)
    101 
     107    # Build the whole domain
     108   
     109#    domain_full = pmesh_to_domain_instance(filename, Domain)
     110
     111    domain_full = cache(pmesh_to_domain_instance,
     112               (filename, Domain),
     113              dependencies = [filename])
     114
     115    rect = array(domain_full.xy_extent, Float)
     116   
     117    # Initialise the wave
     118   
    102119#    domain_full.set_quantity('stage', Set_Stage(200.0,300.0,1.0))
    103120    domain_full.set_quantity('stage', Set_Stage(756000.0,756500.0,2.0))
    104121
     122    # Subdivide the domain
     123   
    105124    nodes, triangles, boundary, triangles_per_proc, quantities = \
    106125         pmesh_divide_steve(domain_full, nx, ny)
    107126
    108     rect = array(domain_full.xy_extent, Float)
    109 
    110127    submesh = build_submesh(nodes, triangles, boundary,\
    111128                            quantities, triangles_per_proc)
    112129   
    113     # send the mesh partition to the appropriate processor
     130    # Send the mesh partition to the appropriate processor
    114131
    115132    for p in range(1, numprocs):
    116133      send_submesh(submesh, triangles_per_proc, p)
    117134
     135    # Build the local mesh for processor 0
     136
    118137    hostmesh = extract_hostmesh(submesh)
    119    
    120     points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict = \
    121              build_local_mesh(hostmesh, 0, triangles_per_proc[0], numprocs)
    122 
    123 # read in the mesh partition that belongs to this
     138    points, vertices, boundary, quantities, ghost_recv_dict, \
     139            full_send_dict = build_local_mesh(hostmesh, 0, \
     140                                              triangles_per_proc[0], \
     141                                              numprocs)
     142
     143# Read in the mesh partition that belongs to this
    124144# processor (note that the information is in the
    125145# correct form for the GA data structure
    126146
    127147else:
    128     points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict \
    129             = rec_submesh(0)
    130 
     148    points, vertices, boundary, quantities, ghost_recv_dict, \
     149            full_send_dict = rec_submesh(0)
     150
     151
     152###########################################
     153# Start the computations on each subpartion
     154###########################################
    131155
    132156##    ########### start profile testing
     
    149173######## end profile testing
    150174   
    151    
     175
     176# The visualiser needs to know the size of the whole domain
     177
    152178pypar.broadcast(rect,0)
    153 #print rect
     179
    154180
    155181domain = Parallel_Domain(points, vertices, boundary,
    156182                         full_send_dict  = full_send_dict,
    157183                         ghost_recv_dict = ghost_recv_dict)
    158 
    159184
    160185try:
Note: See TracChangeset for help on using the changeset viewer.