Changeset 2769


Ignore:
Timestamp:
Apr 27, 2006, 8:46:30 AM (18 years ago)
Author:
linda
Message:

Removed the old pmesh_divide routines, the mesh is now divided using metis. Commented run_parallel_advection, run_parallel_merimbula_metis and run_parallel_sw_merimbula_metis. Add run_parallel_merimbula_tes and run_parallel_sw_merimbula_test as files that can be used to test new changes etc

Location:
inundation/parallel
Files:
3 added
5 edited

Legend:

Unmodified
Added
Removed
  • inundation/parallel/pmesh_divide.py

    r2193 r2769  
    7272#########################################################
    7373#
    74 # Do a simple coordinate based division of the grid
    75 #
    76 # *) n_x is the number of subdivisions in the x direction
    77 # *) n_y is the number of subdivisions in the y direction
     74# Divide the mesh using a call to metis, through pymetis.
    7875#
    7976# -------------------------------------------------------
     
    8784#
    8885#########################################################
    89 
    90 def pmesh_divide(domain, n_x = 1, n_y = 1):
    91 
    92     # Find the bounding box
    93    
    94     x_coord_min = domain.xy_extent[0]
    95     x_coord_max = domain.xy_extent[2]
    96     y_coord_min = domain.xy_extent[1]
    97     y_coord_max = domain.xy_extent[3]
    98 
    99     # Find the size of each sub-box
    100 
    101     x_div = (x_coord_max-x_coord_min)/n_x
    102     y_div = (y_coord_max-y_coord_min)/n_y
    103 
    104     # Initialise the lists
    105    
    106     tri_list = []
    107     triangles_per_proc = []
    108     proc_sum = []
    109     for i in range(n_x*n_y):
    110         tri_list.append([])
    111         triangles_per_proc.append([])
    112         proc_sum.append([])
    113         tri_list[i] = []
    114 
    115     # Subdivide the triangles depending on which sub-box they sit
    116     # in (a triangle sits in the sub-box if its first vectex sits
    117     # in that sub-box)
    118 
    119     tri_index = {}
    120     N = domain.number_of_elements
    121     for i in range(N):
    122         t = domain.triangles[i]
    123 
    124         x_coord = domain.centroid_coordinates[i][0]
    125         bin_x = int(floor((x_coord-x_coord_min)/x_div))
    126         if (bin_x == n_x):
    127             bin_x = n_x-1
    128 
    129         y_coord = domain.centroid_coordinates[i][1]
    130         bin_y = int(floor((y_coord-y_coord_min)/y_div))
    131         if (bin_y == n_y):
    132             bin_y = n_y-1
    133 
    134         bin = bin_y*n_x + bin_x
    135         tri_list[bin].append(t)
    136         tri_index[i] = ([bin, len(tri_list[bin])-1])
    137 
    138     # Find the number of triangles per processor and order the
    139     # triangle list so that all of the triangles belonging to
    140     # processor i are listed before those belonging to processor
    141     # i+1
    142 
    143     triangles = []
    144     for i in range(n_x*n_y):
    145         triangles_per_proc[i] = len(tri_list[i])
    146         for t in tri_list[i]:
    147             triangles.append(t)
    148 
    149     # The boundary labels have to changed in accoradance with the
    150     # new triangle ordering, proc_sum and tri_index help with this
    151 
    152     proc_sum[0] = 0
    153     for i in range(n_x*n_y-1):
    154         proc_sum[i+1]=proc_sum[i]+triangles_per_proc[i]
    155 
    156     # Relabel the boundary elements to fit in with the new triangle
    157     # ordering
    158 
    159     boundary = {}
    160     for b in domain.boundary:
    161         t =  tri_index[b[0]]
    162         boundary[proc_sum[t[0]]+t[1], b[1]] = domain.boundary[b]
    163 
    164     quantities = reorder(domain.quantities, tri_index, proc_sum)
    165 
    166     # Extract the node list
    167    
    168     nodes = domain.coordinates.copy()
    169     ttriangles = zeros((len(triangles), 3), Int)
    170     for i in range(len(triangles)):
    171         ttriangles[i] = triangles[i]
    172        
    173     return nodes, ttriangles, boundary, triangles_per_proc, quantities
    174 
    175 
    176 #########################################################
    177 #
    178 # Do a coordinate based division of the grid using the
    179 # centroid coordinate
    180 #
    181 # *) n_x is the number of subdivisions in the x direction
    182 # *) n_y is the number of subdivisions in the y direction
    183 #
    184 # -------------------------------------------------------
    185 #
    186 # *)  The nodes, triangles, boundary, and quantities are
    187 # returned. triangles_per_proc defines the subdivision.
    188 # The first triangles_per_proc[0] triangles are assigned
    189 # to processor 0, the next triangles_per_proc[1] are
    190 # assigned to processor 1 etc. The boundary and quantites
    191 # are ordered the same way as the triangles
    192 #
    193 #########################################################
    194 def pmesh_divide_steve(domain, n_x = 1, n_y = 1):
    195    
    196     # Find the bounding box
    197     x_coord_min = domain.xy_extent[0]
    198     x_coord_max = domain.xy_extent[2]
    199     y_coord_min = domain.xy_extent[1]
    200     y_coord_max = domain.xy_extent[3]
    201 
    202 
    203     # Find the size of each sub-box
    204 
    205     x_div = (x_coord_max-x_coord_min)/n_x
    206     y_div = (y_coord_max-y_coord_min)/n_y
    207 
    208 
    209     # Initialise the lists
    210    
    211     tri_list = []
    212     triangles_per_proc = []
    213     proc_sum = []
    214     for i in range(n_x*n_y):
    215         tri_list.append([])
    216         triangles_per_proc.append([])
    217         proc_sum.append([])
    218         tri_list[i] = []
    219 
    220     # Subdivide the triangles depending on which sub-box they sit
    221     # in (a triangle sits in the sub-box if its first vectex sits
    222     # in that sub-box)
    223 
    224     tri_index = {}
    225     N = domain.number_of_elements
    226  
    227     # Sort by x coordinate of centroid
    228 
    229     sort_order = argsort(argsort(domain.centroid_coordinates[:,0]))
    230 
    231     x_div = float(N)/n_x
    232 
    233     for i in range(N):
    234         t = domain.triangles[i]
    235        
    236         bin = int(floor(sort_order[i]/x_div))
    237         if (bin == n_x):
    238             bin = n_x-1
    239 
    240         tri_list[bin].append(t)
    241         tri_index[i] = ([bin, len(tri_list[bin])-1])
    242 
    243     # Find the number of triangles per processor and order the
    244     # triangle list so that all of the triangles belonging to
    245     # processor i are listed before those belonging to processor
    246     # i+1
    247 
    248     triangles = []
    249     for i in range(n_x*n_y):
    250         triangles_per_proc[i] = len(tri_list[i])
    251         for t in tri_list[i]:
    252            triangles.append(t)
    253        
    254 
    255     # The boundary labels have to changed in accoradance with the
    256     # new triangle ordering, proc_sum and tri_index help with this
    257 
    258     proc_sum[0] = 0
    259     for i in range(n_x*n_y-1):
    260         proc_sum[i+1]=proc_sum[i]+triangles_per_proc[i]
    261 
    262     # Relabel the boundary elements to fit in with the new triangle
    263     # ordering
    264 
    265     boundary = {}
    266     for b in domain.boundary:
    267         t =  tri_index[b[0]]
    268         boundary[proc_sum[t[0]]+t[1], b[1]] = domain.boundary[b]
    269 
    270     quantities = reorder(domain.quantities, tri_index, proc_sum)
    271 
    272     # Extract the node list
    273    
    274     nodes = domain.coordinates.copy()
    275 
    276     # Convert the triangle datastructure to be an array type,
    277     # this helps with the communication
    278 
    279     ttriangles = zeros((len(triangles), 3), Int)
    280     for i in range(len(triangles)):
    281         ttriangles[i] = triangles[i]
    282        
    283     return nodes, ttriangles, boundary, triangles_per_proc, quantities
    284 
    285 
    286 
    287 
    288 ###
    289 #
    290 # Divide the mesh using a call to metis, through pymetis.
    291 
    292 
    29386path.append('..' + sep + 'pymetis')
    29487
     
    341134            tri_list[epart[i]].append(domain.triangles[i])
    342135            tri_index[i] = ([epart[i], len(tri_list[epart[i]]) - 1])
    343 
    344         # print tri_list
    345         # print triangles_per_proc
    346136       
    347137        # Order the triangle list so that all of the triangles belonging
  • inundation/parallel/run_parallel_advection.py

    r2654 r2769  
    11#!/usr/bin/env python
     2###
     3#########################################################
     4#
     5#  Main file for parallel mesh testing. Runs an advection
     6# flow simulation using a rectangular mesh
     7#
     8#
     9#  Authors: Steve Roberts June 2005
     10#  Modified by Linda Stals April 2006
     11#
     12#
     13#########################################################
     14
    215
    316import sys
     
    518sys.path.append('..'+sep+'pyvolution')
    619
    7 
    8 #========================================================================
     20# Parallel communication routines
    921
    1022import pypar
    1123
    12 from print_stats import print_test_stats, build_full_flag
     24#  Mesh partition routines
    1325
    1426from parallel_meshes import parallel_rectangle
    1527
     28# Parallel Domain
     29 
    1630from parallel_advection import Parallel_Domain
    1731from parallel_advection import Transmissive_boundary
    1832
    19 # Define the inititial conditions
    20 
     33############################
     34# Set the initial conditions
     35############################
    2136class Set_Stage:
    2237    """Set an initial condition with constant water height, for x<x0
     
    3146        return self.h*((x>self.x0)&(x<self.x1))
    3247
    33 # Get information about the parallel set-up
     48###############################
     49# Read in processor information
     50###############################
    3451
    3552numprocs = pypar.size()
     
    4057M = 20
    4158
    42 # Build 1*1 domain containing M*N nodes split over numprocs * 1 processors
     59#######################
     60# Partition the domain
     61#######################
     62
     63# Build a unit mesh, subdivide it over numproces processors with each
     64# submesh containing M*N nodes
    4365
    4466points, vertices, boundary, full_send_dict, ghost_recv_dict =  \
    4567    parallel_rectangle(N, M, len1_g=1.0)
     68
     69###########################################
     70# Start the computations on each subpartion
     71###########################################
    4672
    4773# Create advection domain with direction (1,-1)
     
    5076domain = Parallel_Domain(points, vertices, boundary,
    5177                         full_send_dict, ghost_recv_dict, velocity=[1.0, 0.0])
    52 
    53 # Make a notes of which triangles are full and which are ghost
    54 
    55 tri_full_flag = build_full_flag(domain, ghost_recv_dict)
    5678
    5779# Turn on the visualisation
     
    80102    t0 = time.time()
    81103
    82 # Start the parallel computions
     104# Start the evolve computions
    83105
    84106for t in domain.evolve(yieldstep = 0.1, finaltime = 3.0):
    85107    if myid == 0:
    86108        domain.write_time()
    87     print_test_stats(domain, tri_full_flag)
    88109
    89110# Output some computation statistics
  • inundation/parallel/run_parallel_merimbula.py

    r2654 r2769  
    22#########################################################
    33#
    4 #  Main file for parallel mesh testing.
     4#  Main file for parallel mesh testing. Runs an advection
     5# flow simulation using a rectangular mesh
    56#
    67#  This is a modification of the run_parallel_advection.py
    7 # file.
     8# file
    89#
    910#
     
    1819# the host and processors
    1920#
    20 # *) Things still to do:
    21 #    +) Overlap the communication and computation: The
    22 # communication routines in build_commun.py should be
    23 # interdispersed in the build_submesh.py and build_local.py
    24 # files. This will overlap the communication and
    25 # computation and will be far more efficient. This should
    26 # be done after more testing and there more confidence in
    27 # the subpartioning.
    28 #    +) Much more testing especially with large numbers of
    29 # processors.
    3021#  Authors: Linda Stals, Steve Roberts and Matthew Hardy,
    3122# June 2005
     
    4132from os import sep
    4233sys.path.append('..'+sep+'pyvolution')
     34
     35# Numeric arrays
    4336
    4437from Numeric import array, zeros, Float
     
    10699    # subdivide the mesh
    107100    rect = array(domain_full.xy_extent, Float)
    108 
    109     rect = array(rect, Float)
     101#    rect = array(rect, Float)
    110102
    111103    submesh = build_submesh(nodes, triangles, boundary, quantities, \
  • inundation/parallel/run_parallel_sw_merimbula_metis.py

    r2738 r2769  
    11#!/usr/bin/env python
    22###
    3 # Same as run_parallel_sw_merimbula.py, but uses pmesh_divide_metis
    4 # to partition the mesh.
    53#########################################################
    64#
    7 #  Main file for parallel mesh testing.
     5#  Main file for parallel mesh testing. Runs a shallow
     6# water simulation using the merimbula mesh
    87#
    9 #  This is a modification of the run_parallel_advection.py
    10 # file.
    118#
    129#
    1310# *) The (new) files that have been added to manage the
    1411# grid partitioning are
    15 #    +) pmesh_divide.py: subdivide a pmesh
     12#    +) pmesh_divide_metis.py: subdivide a pmesh
    1613#    +) build_submesh.py: build the submeshes on the host
    1714# processor.
     
    2118# the host and processors
    2219#
    23 # *) Things still to do:
    24 #    +) Overlap the communication and computation: The
    25 # communication routines in build_commun.py should be
    26 # interdispersed in the build_submesh.py and build_local.py
    27 # files. This will overlap the communication and
    28 # computation and will be far more efficient. This should
    29 # be done after more testing and there more confidence in
    30 # the subpartioning.
    31 #    +) Much more testing especially with large numbers of
    32 # processors.
    3320#  Authors: Linda Stals, Steve Roberts and Matthew Hardy,
    3421# June 2005
     
    4128import time
    4229
    43 
    4430from os import sep
    4531sys.path.append('..'+sep+'pyvolution')
     
    4935from Numeric import array, zeros, Float
    5036
    51 # Print debugging information
    52 
    53 from print_stats import print_test_stats, build_full_flag
    54 
    5537# pmesh
    5638
     
    5840from parallel_shallow_water import Parallel_Domain
    5941from pmesh2domain import pmesh_to_domain_instance
    60 
    61 # Reuse previous mesh import
    62 
    63 from caching import cache
    6442
    6543# Mesh partition routines
     
    10482    # Read in the test files
    10583
    106 #    filename = 'test-100.tsh'
    10784    filename = 'merimbula_10785_1.tsh'
    10885
    10986    # Build the whole domain
    11087   
    111 #    domain_full = pmesh_to_domain_instance(filename, Domain)
     88    domain_full = pmesh_to_domain_instance(filename, Domain)
    11289
    113     domain_full = cache(pmesh_to_domain_instance,
    114                (filename, Domain),
    115               dependencies = [filename])
     90    # Define the domain boundaries for visualisation
    11691
    11792    rect = array(domain_full.xy_extent, Float)
     
    11994    # Initialise the wave
    12095
    121 #    domain_full.set_quantity('stage', Set_Stage(200.0,300.0,1.0))
    12296    domain_full.set_quantity('stage', Set_Stage(756000.0,756500.0,2.0))
    12397
    124     # Subdivide the domain
    125 
    126     # Note the different arguments compared with pmesh_divide,
    127     # pmesh_divide_steve etc.
     98    # Subdivide the mesh
    12899   
    129100    nodes, triangles, boundary, triangles_per_proc, quantities = \
    130101         pmesh_divide_metis(domain_full, numprocs)
    131102
    132     rect = array(domain_full.xy_extent, Float)
    133 
     103    # Build the mesh that should be assigned to each processor,
     104    # this includes ghost nodes and the communicaiton pattern
     105   
    134106    submesh = build_submesh(nodes, triangles, boundary,\
    135107                            quantities, triangles_per_proc)
     
    146118             build_local_mesh(hostmesh, 0, triangles_per_proc[0], numprocs)
    147119
    148 # Read in the mesh partition that belongs to this
    149 # processor (note that the information is in the
    150 # correct form for the GA data structure
     120else:
     121   
     122    # Read in the mesh partition that belongs to this
     123    # processor (note that the information is in the
     124    # correct form for the GA data structure)
    151125
    152 else:
    153126    points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict \
    154127            = rec_submesh(0)
     
    159132###########################################
    160133
    161 #if myid == 0:
    162 #    print 'ghost'
    163 #    print ghost_recv_dict
    164 #processor_name
    165 #if myid == 0:
    166 #    print 'full'
    167 #    print full_send_dict
    168 
    169134# The visualiser needs to know the size of the whole domain
    170135
    171136pypar.broadcast(rect,0)
     137
     138# Build the domain for this processor
    172139
    173140domain = Parallel_Domain(points, vertices, boundary,
     
    175142                         ghost_recv_dict = ghost_recv_dict)
    176143
    177 # Make a note of which triangles are full and which are ghost
     144# Visualise the domain
    178145
    179 tri_full_flag = build_full_flag(domain, ghost_recv_dict)
    180 
    181 #try:
    182 #    domain.initialise_visualiser(rect=rect)
    183 #    domain.visualiser.updating['stage'] = True
    184 #    domain.visualiser.setup['elevation'] = True
    185 #    domain.visualiser.qcolor['stage'] = (0.0, 0.0, 0.8)
    186 #    domain.visualiser.scale_z['stage'] = 250
    187 #    domain.visualiser.scale_z['elevation'] = 250
    188 #except:
    189 #    print 'No visualiser'
     146try:
     147    domain.initialise_visualiser(rect=rect)
     148    domain.visualiser.scale_z['stage'] = 0.2
     149    domain.visualiser.scale_z['elevation'] = 0.05
     150except:
     151    print 'No visualiser'
    190152
    191153
    192154domain.default_order = 1
    193155
    194 #Boundaries
     156# Define the boundaries, including the ghost boundary
     157
    195158from parallel_shallow_water import Transmissive_boundary, Reflective_boundary
    196159
    197160T = Transmissive_boundary(domain)
    198161R = Reflective_boundary(domain)
    199 domain.set_boundary( {'outflow': R, 'inflow': R, 'inner':R, 'exterior': R, 'open':R} )
     162domain.set_boundary( {'outflow': R, 'inflow': R, 'inner':R, 'exterior': R, \
     163                      'open':R, 'ghost':None} )
    200164
     165
     166# Set the initial quantities
    201167
    202168domain.set_quantity('stage', quantities['stage'])
     
    204170
    205171domain.store = False
    206 #domain.filename = 'merimbula-%d' %domain.processor
    207172
    208 #---------
    209 # Evolution
     173# Set the number of time steps, as well as the start and end time
     174
    210175t0 = time.time()
    211 
    212 print 'Processor %d on %s: No of elements %d'%(domain.processor,processor_name,domain.number_of_elements)
    213 yieldstep = 0.05
    214 finaltime = 500.0
    215 
    216176yieldstep = 1
    217177finaltime = 90
    218178
    219 #yieldstep = 1
    220 #finaltime = 1
    221 #processor_name
    222 #for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime):
    223 #    if myid == 0:
    224 #        domain.write_time()
    225         #print 'Processor %d, Integral of stage %d'%\
    226         #       (domain.processor,domain.quantities['stage'].get_integral())
    227         #    print_test_stats(domain, tri_full_flag)
    228179
     180# Start the evolve calculations
    229181
    230 # Profiling
    231 #import profile
    232 #profiler = profile.Profile()
    233 #result.dump_stats("profile." + str(numprocs) + "." + str(myid) + ".dat")
     182for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime):
     183    if myid == 0:
     184        domain.write_time()
    234185
    235 #New hotshot profiling
    236 import hotshot
    237 profiler = hotshot.Profile("hotshot." + str(numprocs) + "." + str(myid) + ".prof")
    238 s = '''for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime):
    239   if myid == 0:
    240     domain.write_time()
    241   print_test_stats(domain, tri_full_flag)
    242 
    243 '''
    244 result = profiler.runctx(s, globals(), locals())
    245 profiler.close()
    246 
    247 #print 'P%d: That took %.2f seconds' %(myid, time.time()-t0)
    248 #print 'P%d: Communication time %.2f seconds' %(myid, domain.communication_time)
    249 #print 'P%d: Reduction Communication time %.2f seconds' %(myid, domain.communication_reduce_time)
    250 #print 'P%d: Broadcast time %.2f seconds' %(myid, domain.communication_broadcast_time)
    251 
    252 
     186# Print some timing statistics
    253187
    254188if myid == 0:
  • inundation/parallel/run_parallel_sw_rectangle.py

    r2761 r2769  
    3535
    3636
     37<<<<<<< .mine
     38=======
    3739#from pmesh_divide import pmesh_divide, pmesh_divide_steve
    3840
     41>>>>>>> .r2768
    3942# read in the processor information
    4043
Note: See TracChangeset for help on using the changeset viewer.