Changeset 2152 for inundation/parallel


Ignore:
Timestamp:
Dec 16, 2005, 9:59:58 AM (19 years ago)
Author:
linda
Message:

Added caching to the sw_merimbula files. Also updated some of the comments

Location:
inundation/parallel
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • inundation/parallel/run_advection.py

    r1639 r2152  
    1515from mesh_factory import rectangular
    1616
    17 points, vertices, boundary = rectangular(60, 60)
     17#points, vertices, boundary = rectangular(60, 60)
     18points, vertices, boundary = rectangular(10, 10)
    1819
    1920#Create advection domain with direction (1,-1)
  • inundation/parallel/run_parallel_merimbula.py

    r2130 r2152  
    77# file.
    88#
    9 #  *) The test files currently avaliable are of the form
    10 # test*.out, eg test_5l_4c.out. The term infront of the l
    11 # corresponds to the number of levels of refinement
    12 # required to build the grid, i.e. a higher number
    13 # corresponds to a finer grid. The term infront of the c
    14 # corresponds to the number of processors.
    159#
    1610# *) The (new) files that have been added to manage the
  • inundation/parallel/run_parallel_sw_merimbula.py

    r2131 r2152  
    77# file.
    88#
    9 #  *) The test files currently avaliable are of the form
    10 # test*.out, eg test_5l_4c.out. The term infront of the l
    11 # corresponds to the number of levels of refinement
    12 # required to build the grid, i.e. a higher number
    13 # corresponds to a finer grid. The term infront of the c
    14 # corresponds to the number of processors.
    159#
    1610# *) The (new) files that have been added to manage the
     
    4034#
    4135#########################################################
     36
    4237import sys
    4338import pypar    # The Python-MPI interface
    4439import time
    4540
    46 
    4741from os import sep
    4842sys.path.append('..'+sep+'pyvolution')
    4943
    50 from Numeric import array
     44# Numeric arrays
     45
     46from Numeric import array, zeros, Float
     47
    5148# pmesh
    52 
    53 #from shallow_water import Domain
    5449
    5550from shallow_water import Domain
    5651from parallel_shallow_water import Parallel_Domain
    57 
    58 # mesh partition routines
     52from pmesh2domain import pmesh_to_domain_instance
     53
     54# Reuse previous mesh import
     55
     56from caching import cache
     57
     58# Mesh partition routines
    5959
    6060from pmesh_divide import pmesh_divide, pmesh_divide_steve
    61 from build_submesh import *
    62 from build_local import *
    63 from build_commun import *
    64 from pmesh2domain import pmesh_to_domain_instance
    65 
    66 # read in the processor information
     61from build_submesh import build_submesh, extract_hostmesh
     62from build_local   import build_local_mesh
     63from build_commun  import send_submesh, rec_submesh
     64
     65
     66###############################
     67# Read in processor information
     68###############################
    6769
    6870numprocs = pypar.size()
     
    7072processor_name = pypar.Get_processor_name()
    7173
    72 #-------
    73 # Domain
     74############################
     75# Set the initial conditions
     76############################
     77
    7478rect = zeros( 4, Float) # Buffer for results
    7579
     
    8690        return self.h*((x>self.x0)&(x<self.x1))
    8791
     92#######################
     93# Partition the domain
     94#######################
    8895
    8996if myid == 0:
    9097
    91     # read in the test files
     98    # Read in the test files
    9299
    93100#    filename = 'test-100.tsh'
     
    98105        print "WARNING: number of subboxes is not equal to the number of proc"
    99106
    100     domain_full = pmesh_to_domain_instance(filename, Domain)
    101 
     107    # Build the whole domain
     108   
     109#    domain_full = pmesh_to_domain_instance(filename, Domain)
     110
     111    domain_full = cache(pmesh_to_domain_instance,
     112               (filename, Domain),
     113              dependencies = [filename])
     114
     115    rect = array(domain_full.xy_extent, Float)
     116   
     117    # Initialise the wave
     118   
    102119#    domain_full.set_quantity('stage', Set_Stage(200.0,300.0,1.0))
    103120    domain_full.set_quantity('stage', Set_Stage(756000.0,756500.0,2.0))
    104121
     122    # Subdivide the domain
     123   
    105124    nodes, triangles, boundary, triangles_per_proc, quantities = \
    106125         pmesh_divide_steve(domain_full, nx, ny)
    107126
    108     rect = array(domain_full.xy_extent, Float)
    109 
    110127    submesh = build_submesh(nodes, triangles, boundary,\
    111128                            quantities, triangles_per_proc)
    112129   
    113     # send the mesh partition to the appropriate processor
     130    # Send the mesh partition to the appropriate processor
    114131
    115132    for p in range(1, numprocs):
    116133      send_submesh(submesh, triangles_per_proc, p)
    117134
     135    # Build the local mesh for processor 0
     136
    118137    hostmesh = extract_hostmesh(submesh)
    119    
    120     points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict = \
    121              build_local_mesh(hostmesh, 0, triangles_per_proc[0], numprocs)
    122 
    123 # read in the mesh partition that belongs to this
     138    points, vertices, boundary, quantities, ghost_recv_dict, \
     139            full_send_dict = build_local_mesh(hostmesh, 0, \
     140                                              triangles_per_proc[0], \
     141                                              numprocs)
     142
     143# Read in the mesh partition that belongs to this
    124144# processor (note that the information is in the
    125145# correct form for the GA data structure
    126146
    127147else:
    128     points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict \
    129             = rec_submesh(0)
    130 
     148    points, vertices, boundary, quantities, ghost_recv_dict, \
     149            full_send_dict = rec_submesh(0)
     150
     151
     152###########################################
     153# Start the computations on each subpartion
     154###########################################
    131155
    132156##    ########### start profile testing
     
    149173######## end profile testing
    150174   
    151    
     175
     176# The visualiser needs to know the size of the whole domain
     177
    152178pypar.broadcast(rect,0)
    153 #print rect
     179
    154180
    155181domain = Parallel_Domain(points, vertices, boundary,
    156182                         full_send_dict  = full_send_dict,
    157183                         ghost_recv_dict = ghost_recv_dict)
    158 
    159184
    160185try:
  • inundation/parallel/run_parallel_sw_merimbula_metis.py

    r2130 r2152  
    1010# file.
    1111#
    12 #  *) The test files currently avaliable are of the form
    13 # test*.out, eg test_5l_4c.out. The term infront of the l
    14 # corresponds to the number of levels of refinement
    15 # required to build the grid, i.e. a higher number
    16 # corresponds to a finer grid. The term infront of the c
    17 # corresponds to the number of processors.
    1812#
    1913# *) The (new) files that have been added to manage the
     
    5145sys.path.append('..'+sep+'pyvolution')
    5246
    53 from Numeric import array
     47# Numeric arrays
     48
     49from Numeric import array, zeros, Float
     50
    5451# pmesh
    55 
    56 #from shallow_water import Domain
    5752
    5853from shallow_water import Domain
    5954from parallel_shallow_water import Parallel_Domain
    60 
    61 # mesh partition routines
     55from pmesh2domain import pmesh_to_domain_instance
     56
     57# Reuse previous mesh import
     58
     59from caching import cache
     60
     61# Mesh partition routines
    6262
    6363from pmesh_divide import pmesh_divide_metis
    64 from build_submesh import *
    65 from build_local import *
    66 from build_commun import *
    67 from pmesh2domain import pmesh_to_domain_instance
    68 
    69 # read in the processor information
     64from build_submesh import build_submesh, extract_hostmesh
     65from build_local   import build_local_mesh
     66from build_commun  import send_submesh, rec_submesh
     67
     68###############################
     69# Read in processor information
     70###############################
    7071
    7172numprocs = pypar.size()
     
    7374processor_name = pypar.Get_processor_name()
    7475
    75 #-------
    76 # Domain
     76############################
     77# Set the initial conditions
     78############################
     79
    7780rect = zeros( 4, Float) # Buffer for results
    7881
     
    8992        return self.h*((x>self.x0)&(x<self.x1))
    9093
     94#######################
     95# Partition the domain
     96#######################
    9197
    9298if myid == 0:
    9399
    94     # read in the test files
     100    # Read in the test files
    95101
    96102#    filename = 'test-100.tsh'
    97103    filename = 'merimbula_10785_1.tsh'
    98104
    99     domain_full = pmesh_to_domain_instance(filename, Domain)
     105    # Build the whole domain
     106   
     107#    domain_full = pmesh_to_domain_instance(filename, Domain)
     108
     109    domain_full = cache(pmesh_to_domain_instance,
     110               (filename, Domain),
     111              dependencies = [filename])
     112
     113    rect = array(domain_full.xy_extent, Float)
     114
     115    # Initialise the wave
    100116
    101117#    domain_full.set_quantity('stage', Set_Stage(200.0,300.0,1.0))
    102118    domain_full.set_quantity('stage', Set_Stage(756000.0,756500.0,2.0))
     119
     120    # Subdivide the domain
    103121
    104122    # Note the different arguments compared with pmesh_divide,
     
    113131                            quantities, triangles_per_proc)
    114132
    115     # send the mesh partition to the appropriate processor
     133    # Send the mesh partition to the appropriate processor
    116134
    117135    for p in range(1, numprocs):
    118136      send_submesh(submesh, triangles_per_proc, p)
     137
     138    # Build the local mesh for processor 0
    119139
    120140    hostmesh = extract_hostmesh(submesh)
     
    122142             build_local_mesh(hostmesh, 0, triangles_per_proc[0], numprocs)
    123143
    124 # read in the mesh partition that belongs to this
     144# Read in the mesh partition that belongs to this
    125145# processor (note that the information is in the
    126146# correct form for the GA data structure
     
    129149    points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict \
    130150            = rec_submesh(0)
     151
     152
     153###########################################
     154# Start the computations on each subpartion
     155###########################################
    131156
    132157#if myid == 0:
     
    138163#    print full_send_dict
    139164
     165# The visualiser needs to know the size of the whole domain
    140166
    141167pypar.broadcast(rect,0)
    142 #print rect
    143168
    144169domain = Parallel_Domain(points, vertices, boundary,
  • inundation/parallel/run_parallel_sw_rectangle.py

    r2130 r2152  
    1 #!/usr/bin/env/python
     1#!/usr/bin/env python
    22#########################################################
    33#
     
    77# file.
    88#
    9 #  *) The test files currently avaliable are of the form
    10 # test*.out, eg test_5l_4c.out. The term infront of the l
    11 # corresponds to the number of levels of refinement
    12 # required to build the grid, i.e. a higher number
    13 # corresponds to a finer grid. The term infront of the c
    14 # corresponds to the number of processors.
    159#
    16 # *) The (new) files that have been added to manage the
    17 # grid partitioning are
    18 #    +) pmesh_divide.py: subdivide a pmesh
    19 #    +) build_submesh.py: build the submeshes on the host
    20 # processor.
    21 #    +) build_local.py: build the GA mesh datastructure
    22 # on each processor.
    23 #    +) build_commun.py: handle the communication between
    24 # the host and processors
    25 #
    26 # *) Things still to do:
    27 #    +) Overlap the communication and computation: The
    28 # communication routines in build_commun.py should be
    29 # interdispersed in the build_submesh.py and build_local.py
    30 # files. This will overlap the communication and
    31 # computation and will be far more efficient. This should
    32 # be done after more testing and there more confidence in
    33 # the subpartioning.
    34 #    +) Much more testing especially with large numbers of
    35 # processors.
    3610#  Authors: Linda Stals, Steve Roberts and Matthew Hardy,
    3711# June 2005
     
    6236
    6337from pmesh_divide import pmesh_divide, pmesh_divide_steve
    64 from build_submesh import *
    65 from build_local import *
    66 from build_commun import *
    67 from pmesh2domain import pmesh_to_domain_instance
    6838
    6939# read in the processor information
     
    7242myid = pypar.rank()
    7343processor_name = pypar.Get_processor_name()
    74 
    75 
    76 
    77 
    7844
    7945M = 20
Note: See TracChangeset for help on using the changeset viewer.