Changeset 2130


Ignore:
Timestamp:
Dec 8, 2005, 8:29:24 AM (18 years ago)
Author:
linda
Message:

Modified the parallel code to agree with the python style files

Location:
inundation/parallel
Files:
1 deleted
8 edited

Legend:

Unmodified
Added
Removed
  • inundation/parallel/build_commun.py

    r2094 r2130  
    1717#########################################################
    1818
    19 from Numeric import array, Int, Float
     19from Numeric import array, Int, Float, zeros
    2020import logging, logging.config
    2121logger = logging.getLogger('parallel')
     
    3030import sys
    3131import pypar
    32 from Numeric import zeros
    33 from build_local import *
     32
     33from build_local import build_local_mesh
    3434
    3535#########################################################
     
    140140def rec_submesh_flat(p):
    141141
    142     from Numeric import zeros, Float, Int
    143 
    144142    numproc = pypar.size()
    145143    myid = pypar.rank()
     
    273271def rec_submesh(p):
    274272
    275     from Numeric import zeros, Float, Int
    276 
    277273    numproc = pypar.size()
    278274    myid = pypar.rank()
  • inundation/parallel/build_local.py

    r2091 r2130  
    1818#########################################################
    1919
    20 from mesh import *
    21 from Numeric import *
     20from Numeric import  zeros, Float, Int, concatenate, \
     21     take, arrayrange, put, sort, compress, equal
     22
    2223
    2324#########################################################
     
    4445    Ntriangles = len(triangles)
    4546   
    46     # extract the nodes (using the local ID)
     47    # Extract the nodes (using the local ID)
    4748   
    4849    GAnodes = take(nodes, (1, 2), 1)
    4950
    50     # build a global ID to local ID mapping
     51    # Build a global ID to local ID mapping
    5152
    5253    NGlobal = 0
     
    5556            NGlobal = nodes[i][0]
    5657    index = zeros(int(NGlobal)+1, Int)
    57     put(index, take(nodes, (0,), 1).astype(Int), arrayrange(Nnodes))
     58    put(index, take(nodes, (0,), 1).astype(Int), \
     59        arrayrange(Nnodes))
    5860       
    59     # change the global IDs in the triangles to the local IDs
     61    # Change the global IDs in the triangles to the local IDs
    6062
    6163    GAtriangles = zeros((Ntriangles, 3), Int)
     
    9496def build_local_commun(index, ghostc, fullc, nproc):
    9597
    96     # initialise
     98    # Initialise
    9799
    98100    full_send = {}
    99101    ghost_recv = {}
    100102
    101     # build the ghost_recv dictionary (sort the
     103    # Build the ghost_recv dictionary (sort the
    102104    # information by the global numbering)
    103105   
     
    112114            ghost_recv[c][0] = take(index, d)
    113115           
    114     # build a temporary copy of the full_send dictionary
     116    # Build a temporary copy of the full_send dictionary
    115117    # (this version allows the information to be stored
    116118    # by the global numbering)
     
    122124            if not tmp_send.has_key(neigh):
    123125                tmp_send[neigh] = []
    124             tmp_send[neigh].append([global_id, index[global_id]])
    125 
    126     # extract the full send information and put it in the form
     126            tmp_send[neigh].append([global_id, \
     127                                    index[global_id]])
     128
     129    # Extract the full send information and put it in the form
    127130    # required for the full_send dictionary
    128131
     
    158161def build_local_mesh(submesh, lower_t, upper_t, nproc):
    159162
    160     # combine the full nodes and ghost nodes
    161 
    162     nodes = concatenate((submesh["full_nodes"], submesh["ghost_nodes"]))
    163    
    164     # combine the full triangles and ghost triangles
     163    # Combine the full nodes and ghost nodes
     164
     165    nodes = concatenate((submesh["full_nodes"], \
     166                         submesh["ghost_nodes"]))
     167   
     168    # Combine the full triangles and ghost triangles
    165169
    166170    gtri =  take(submesh["ghost_triangles"],(1, 2, 3),1)
    167171    triangles = concatenate((submesh["full_triangles"], gtri))
    168172
    169     # renumber the boundary edges to correspond to the new
     173    # Renumber the boundary edges to correspond to the new
    170174    # triangle numbering
    171175
    172176    GAboundary = {}
    173177    for b in submesh["full_boundary"]:
    174         GAboundary[b[0]-lower_t,b[1]]=submesh["full_boundary"][b]
    175 
    176     # make note of the new triangle numbers, including the ghost
     178        GAboundary[b[0]-lower_t,b[1]] = submesh["full_boundary"][b]
     179
     180    # Make note of the new triangle numbers, including the ghost
    177181    # triangles
    178182
     
    187191        index[submesh["ghost_triangles"][i][0]] = i+upper_t-lower_t
    188192
    189     # change the node numbering (and update the numbering in the
     193    # Change the node numbering (and update the numbering in the
    190194    # triangles)
    191195
    192196    [GAnodes, GAtriangles] = build_local_GA(nodes, triangles)
    193197
    194     # extract the local quantities
     198    # Extract the local quantities
    195199   
    196200    quantities ={}
     
    202206        quantities[k][Nf:Nf+Ng] = submesh["ghost_quan"][k]
    203207                             
    204     # change the communication pattern into a form needed by
    205     # the parallel_advection.py file
     208    # Change the communication pattern into a form needed by
     209    # the parallel_adv
    206210
    207211    gcommun = submesh["ghost_commun"]
    208212    fcommun = submesh["full_commun"]
    209     [ghost_rec, full_send] = build_local_commun(index, gcommun, fcommun, nproc)
    210 
    211     # clean up before exiting
     213    [ghost_rec, full_send] = \
     214                build_local_commun(index, gcommun, fcommun, nproc)
     215
     216    # Clean up before exiting
    212217
    213218    del(index)
    214219
    215     return GAnodes, GAtriangles, GAboundary, quantities, ghost_rec, full_send
     220    return GAnodes, GAtriangles, GAboundary, quantities, ghost_rec, \
     221           full_send
  • inundation/parallel/build_submesh.py

    r2105 r2130  
    1313
    1414import sys
    15 from mesh import *
    16 from Numeric import *
     15
     16from Numeric import zeros, Float, Int, concatenate, \
     17     reshape, arrayrange, take, nonzero
     18
     19from mesh import Mesh
     20
    1721
    1822#########################################################
     
    4145def submesh_full(nodes, triangles, boundary, triangles_per_proc):
    4246
    43     # initialise
     47    # Initialise
    4448
    4549    tlower = 0
     
    5054    boundary_list = []
    5155    submesh = {}
    52     tsubnodes = concatenate((reshape(arrayrange(nnodes),(nnodes,1)), nodes), 1)
    53 
    54     # loop over processors
     56    node_range = reshape(arrayrange(nnodes),(nnodes,1))
     57    tsubnodes = concatenate((node_range, nodes), 1)
     58
     59    # Loop over processors
    5560
    5661    for p in range(nproc):
    5762
    58         # find triangles on processor p
     63        # Find triangles on processor p
    5964
    6065        tupper = triangles_per_proc[p]+tlower
     
    6267        triangle_list.append(subtriangles)
    6368
    64         # find the boundary edges on processor p
     69        # Find the boundary edges on processor p
    6570
    6671        subboundary = {}
     
    7075        boundary_list.append(subboundary)
    7176
    72         # find nodes in processor p
     77        # Find nodes in processor p
    7378
    7479        nodemap = zeros(nnodes, 'i')
     
    8085        node_list.append(take(tsubnodes,nonzero(nodemap)))
    8186
    82         # move to the next processor
     87        # Move to the next processor
    8388
    8489        tlower = tupper
    8590
    86     # put the results in a dictionary
     91    # Put the results in a dictionary
    8792
    8893    submesh["full_nodes"] = node_list
     
    9095    submesh["full_boundary"] = boundary_list
    9196
    92     # clean up before exiting
     97    # Clean up before exiting
    9398
    9499    del (nodemap)
     
    128133    ntriangles = len(mesh.triangles)
    129134
    130     # find the first layer of boundary triangles
     135    # Find the first layer of boundary triangles
    131136
    132137    trianglemap = zeros(ntriangles, 'i')
     
    145150                trianglemap[n] = 1
    146151
    147     # find the second layer of boundary triangles
     152    # Find the second layer of boundary triangles
    148153
    149154    for t in range(len(trianglemap)):
     
    162167                    trianglemap[n] = 1
    163168
    164     # build the triangle list and make note of the vertices
     169    # Build the triangle list and make note of the vertices
    165170
    166171    nodemap = zeros(ncoord, 'i')
     
    179184    subtriangles = take(tsubtriangles, nonzero(trianglemap))
    180185
    181     # keep a record of the triangle vertices, if they are not already there
     186    # Keep a record of the triangle vertices, if they are not already there
    182187
    183188    subnodes = []
     
    189194    subnodes = take(tsubnodes, nonzero(nodemap))
    190195
    191     # clean up before exiting
     196    # Clean up before exiting
    192197
    193198    del (nodelist)
     
    197202    del (trianglemap)
    198203
    199     # return the triangles and vertices sitting on the boundary layer
     204    # Return the triangles and vertices sitting on the boundary layer
    200205
    201206    return subnodes, subtriangles
     
    220225def ghost_commun_pattern(subtri, p, tri_per_proc):
    221226
    222     # loop over the ghost triangles
     227    # Loop over the ghost triangles
    223228
    224229    ghost_commun = zeros((len(subtri), 2), Int)
     
    227232        global_no = subtri[i][0]
    228233
    229         # find which processor contains the full triangle
     234        # Find which processor contains the full triangle
    230235
    231236        nproc = len(tri_per_proc)
     
    238243            sum = sum+tri_per_proc[q]
    239244
    240         # keep a copy of the neighbour processor number
     245        # Keep a copy of the neighbour processor number
    241246
    242247        ghost_commun[i] = [global_no, neigh]
     
    273278    full_commun = []
    274279
    275     # loop over the processor
     280    # Loop over the processor
    276281
    277282    for p in range(nproc):
    278283
    279         # loop over the full triangles in the current processor
     284        # Loop over the full triangles in the current processor
    280285        # and build an empty dictionary
    281286
     
    287292        tlower = tupper
    288293
    289     # loop over the processor again
     294    # Loop over the processor again
    290295
    291296    for p in range(nproc):
    292297
    293         # loop over the ghost triangles in the current processor,
     298        # Loop over the ghost triangles in the current processor,
    294299        # find which processor contains the corresponding full copy
    295300        # and note that the processor must send updates to this
     
    334339    ghost_commun = []
    335340
    336     # loop over processors
     341    # Loop over the processors
    337342
    338343    for p in range(nproc):
    339344
    340         # find the full triangles in this processor
     345        # Find the full triangles in this processor
    341346
    342347        tupper = triangles_per_proc[p]+tlower
    343348
    344         # build the ghost boundary layer
    345 
    346         [subnodes, subtri] = ghost_layer(submesh, mesh, p, tupper, tlower)
     349        # Build the ghost boundary layer
     350
     351        [subnodes, subtri] = \
     352                   ghost_layer(submesh, mesh, p, tupper, tlower)
    347353        ghost_triangles.append(subtri)
    348354        ghost_nodes.append(subnodes)
    349355
    350         # build the communication pattern for the ghost nodes
    351 
    352         gcommun = ghost_commun_pattern(subtri, p, triangles_per_proc)
     356        # Build the communication pattern for the ghost nodes
     357
     358        gcommun = \
     359                ghost_commun_pattern(subtri, p, triangles_per_proc)
    353360        ghost_commun.append(gcommun)
    354361
    355         # move to the next processor
     362        # Move to the next processor
    356363
    357364        tlower = tupper
    358365
    359     # record the ghost layer and communication pattern
     366    # Record the ghost layer and communication pattern
    360367
    361368    submesh["ghost_nodes"] = ghost_nodes
     
    363370    submesh["ghost_commun"] = ghost_commun
    364371
    365     # build the communication pattern for the full triangles
     372    # Build the communication pattern for the full triangles
    366373
    367374    full_commun = full_commun_pattern(submesh, triangles_per_proc)
    368375    submesh["full_commun"] = full_commun
    369376
    370     # return the submesh
     377    # Return the submesh
    371378
    372379    return submesh
     
    397404    lower = 0
    398405
    399     # build an empty dictionary to hold the quantites
     406    # Build an empty dictionary to hold the quantites
    400407
    401408    submesh["full_quan"] = {}
     
    405412        submesh["ghost_quan"][k] = []
    406413
    407     # loop trough the subdomains
     414    # Loop trough the subdomains
    408415
    409416    for p in range(nproc):
    410417        upper =   lower+triangles_per_proc[p]
    411418
    412         # find the global ID of the ghost triangles
     419        # Find the global ID of the ghost triangles
    413420
    414421        global_id = []
     
    417424            global_id.append(submesh["ghost_triangles"][p][j][0])
    418425
    419         # use the global ID to extract the quantites information from
     426        # Use the global ID to extract the quantites information from
    420427        # the full domain
    421428
     
    424431            submesh["ghost_quan"][k].append(zeros( (M,3) , Float))
    425432            for j in range(M):
    426                 submesh["ghost_quan"][k][p][j] = quantities[k][global_id[j]]
     433                submesh["ghost_quan"][k][p][j] = \
     434                                               quantities[k][global_id[j]]
    427435
    428436        lower = upper
     
    448456                  triangles_per_proc):
    449457
    450     # temporarily build the mesh to find the neighbouring
     458    # Temporarily build the mesh to find the neighbouring
    451459    # triangles
    452460
    453461    mesh = Mesh(nodes, triangles)
    454462
    455     # subdivide into non-overlapping partitions
    456 
    457     submeshf = submesh_full(nodes, triangles, edges, triangles_per_proc)
    458 
    459     # add any extra ghost boundary layer information
     463    # Subdivide into non-overlapping partitions
     464
     465    submeshf = submesh_full(nodes, triangles, edges, \
     466                            triangles_per_proc)
     467
     468    # Add any extra ghost boundary layer information
    460469
    461470    submeshg = submesh_ghost(submeshf, mesh, triangles_per_proc)
    462471
    463     # order the quantities information to be the same as the triangle
     472    # Order the quantities information to be the same as the triangle
    464473    # information
    465474
    466     submesh = submesh_quantities(submeshg, quantities, triangles_per_proc)
     475    submesh = submesh_quantities(submeshg, quantities, \
     476                                 triangles_per_proc)
    467477
    468478    return submesh
  • inundation/parallel/pmesh_divide.py

    r2095 r2130  
    55#
    66#
    7 #  This is only intended as a temporary file, once an
    8 # automatic grid partitioner has been incorporated this
    9 # file will become redundant.
     7#  The final routine, pmesh_divide_metis, does automatic
     8# grid partitioning. Once testing has finished on this
     9# routine the others should be removed.
    1010#
    1111#  Authors: Linda Stals and Matthew Hardy, June 2005
     
    1616#########################################################
    1717
    18 
     18from os import sep
     19from sys import path
    1920from math import floor
    20 from Numeric import zeros, Float, Int, reshape
     21
     22from Numeric import zeros, Float, Int, reshape, argsort
     23
    2124
    2225#########################################################
     
    4043def reorder(quantities, tri_index, proc_sum):
    4144
    42     # find the number triangles
     45    # Find the number triangles
    4346
    4447    N = len(tri_index)
    4548
    46     # temporary storage area
     49    # Temporary storage area
    4750
    4851    index = zeros(N, Int)
    4952    q_reord = {}
    5053
    51     # find the new ordering of the triangles
     54    # Find the new ordering of the triangles
    5255
    5356    for i in range(N):
     
    5659        index[i] = proc_sum[bin]+bin_off_set
    5760
    58     # reorder each quantity according to the new ordering
     61    # Reorder each quantity according to the new ordering
    5962
    6063    for k in quantities:
     
    8790def pmesh_divide(domain, n_x = 1, n_y = 1):
    8891
    89     # find the bounding box
     92    # Find the bounding box
    9093   
    9194    x_coord_min = domain.xy_extent[0]
     
    9497    y_coord_max = domain.xy_extent[3]
    9598
    96     # find the size of each sub-box
     99    # Find the size of each sub-box
    97100
    98101    x_div = (x_coord_max-x_coord_min)/n_x
    99102    y_div = (y_coord_max-y_coord_min)/n_y
    100103
    101     # initialise the lists
     104    # Initialise the lists
    102105   
    103106    tri_list = []
     
    110113        tri_list[i] = []
    111114
    112     # subdivide the triangles depending on which sub-box they sit
     115    # Subdivide the triangles depending on which sub-box they sit
    113116    # in (a triangle sits in the sub-box if its first vectex sits
    114117    # in that sub-box)
     
    133136        tri_index[i] = ([bin, len(tri_list[bin])-1])
    134137
    135     # find the number of triangles per processor and order the
     138    # Find the number of triangles per processor and order the
    136139    # triangle list so that all of the triangles belonging to
    137140    # processor i are listed before those belonging to processor
     
    144147            triangles.append(t)
    145148
    146     # the boundary labels have to changed in accoradance with the
     149    # The boundary labels have to changed in accoradance with the
    147150    # new triangle ordering, proc_sum and tri_index help with this
    148151
     
    151154        proc_sum[i+1]=proc_sum[i]+triangles_per_proc[i]
    152155
    153     # relabel the boundary elements to fit in with the new triangle
     156    # Relabel the boundary elements to fit in with the new triangle
    154157    # ordering
    155158
     
    161164    quantities = reorder(domain.quantities, tri_index, proc_sum)
    162165
    163     # extract the node list
     166    # Extract the node list
    164167   
    165168    nodes = domain.coordinates.copy()
     
    191194def pmesh_divide_steve(domain, n_x = 1, n_y = 1):
    192195   
    193     # find the bounding box
     196    # Find the bounding box
    194197    x_coord_min = domain.xy_extent[0]
    195198    x_coord_max = domain.xy_extent[2]
     
    198201
    199202
    200     # find the size of each sub-box
     203    # Find the size of each sub-box
    201204
    202205    x_div = (x_coord_max-x_coord_min)/n_x
     
    204207
    205208
    206     # initialise the lists
     209    # Initialise the lists
     210   
    207211    tri_list = []
    208212    triangles_per_proc = []
     
    214218        tri_list[i] = []
    215219
    216     # subdivide the triangles depending on which sub-box they sit
     220    # Subdivide the triangles depending on which sub-box they sit
    217221    # in (a triangle sits in the sub-box if its first vectex sits
    218222    # in that sub-box)
     
    221225    N = domain.number_of_elements
    222226 
    223     #sort by x coordinate of centroid
    224     from Numeric import argsort
     227    # Sort by x coordinate of centroid
     228
    225229    sort_order = argsort(argsort(domain.centroid_coordinates[:,0]))
    226230
     
    237241        tri_index[i] = ([bin, len(tri_list[bin])-1])
    238242
    239     #print tri_list
    240     #print tri_index
    241 
    242     # find the number of triangles per processor and order the
     243    # Find the number of triangles per processor and order the
    243244    # triangle list so that all of the triangles belonging to
    244245    # processor i are listed before those belonging to processor
     
    252253       
    253254
    254     # the boundary labels have to changed in accoradance with the
     255    # The boundary labels have to changed in accoradance with the
    255256    # new triangle ordering, proc_sum and tri_index help with this
    256257
     
    259260        proc_sum[i+1]=proc_sum[i]+triangles_per_proc[i]
    260261
    261     # relabel the boundary elements to fit in with the new triangle
     262    # Relabel the boundary elements to fit in with the new triangle
    262263    # ordering
    263264
     
    269270    quantities = reorder(domain.quantities, tri_index, proc_sum)
    270271
    271     # extract the node list
     272    # Extract the node list
     273   
    272274    nodes = domain.coordinates.copy()
    273275
    274 
    275     # convert the triangle datastructure to be an array typ
     276    # Convert the triangle datastructure to be an array type,
    276277    # this helps with the communication
    277278
     
    289290# Divide the mesh using a call to metis, through pymetis.
    290291
    291 from os import sep
    292 from sys import path
    293292
    294293path.append('..' + sep + 'pymetis')
     
    297296
    298297def pmesh_divide_metis(domain, n_procs):
    299     # initialise the lists
     298   
     299    # Initialise the lists
    300300    # List, indexed by processor of # triangles.
     301   
    301302    triangles_per_proc = []
     303   
    302304    # List of lists, indexed by processor of vertex numbers
     305   
    303306    tri_list = []
     307   
    304308    # List indexed by processor of cumulative total of triangles allocated.
     309   
    305310    proc_sum = []
    306311    for i in range(n_procs):
     
    310315
    311316    # Prepare variables for the metis call
     317   
    312318    n_tri = len(domain.triangles)
    313319    if n_procs != 1: #Because metis chokes on it...
     
    317323   
    318324        # The 1 here is for triangular mesh elements.
     325       
    319326        edgecut, epart, npart = partMeshNodal(n_tri, n_vert, t_list, 1, n_procs)
    320327        del edgecut
     
    327334        # tri_index maps triangle number -> processor, new triangle number
    328335        # (local to the processor)
     336       
    329337        tri_index = {}
    330338        triangles = []       
     
    337345        # print triangles_per_proc
    338346       
    339         # order the triangle list so that all of the triangles belonging
     347        # Order the triangle list so that all of the triangles belonging
    340348        # to processor i are listed before those belonging to processor
    341349        # i+1
     
    345353                triangles.append(t)
    346354           
    347         # the boundary labels have to changed in accoradance with the
     355        # The boundary labels have to changed in accoradance with the
    348356        # new triangle ordering, proc_sum and tri_index help with this
    349357
     
    352360            proc_sum[i+1]=proc_sum[i]+triangles_per_proc[i]
    353361
    354         # relabel the boundary elements to fit in with the new triangle
     362        # Relabel the boundary elements to fit in with the new triangle
    355363        # ordering
    356364
     
    365373        triangles_per_proc[0] = n_tri
    366374        triangles = domain.triangles.copy()
     375       
    367376        # This is essentially the same as a chunk of code from reorder.
     377       
    368378        quantities = {}
    369379        for k in domain.quantities:
     
    372382                quantities[k][i] = domain.quantities[k].vertex_values[i]
    373383       
    374     # extract the node list
     384    # Extract the node list
     385   
    375386    nodes = domain.coordinates.copy()
    376     # convert the triangle datastructure to be an array type
     387   
     388    # Convert the triangle datastructure to be an array type,
    377389    # this helps with the communication
    378390
  • inundation/parallel/run_parallel_merimbula.py

    r2050 r2130  
    1616# *) The (new) files that have been added to manage the
    1717# grid partitioning are
    18 #    +) mg2ga.py: read in the test files.
    1918#    +) pmesh_divide.py: subdivide a pmesh
    2019#    +) build_submesh.py: build the submeshes on the host
  • inundation/parallel/run_parallel_sw_merimbula.py

    r2090 r2130  
    1616# *) The (new) files that have been added to manage the
    1717# grid partitioning are
    18 #    +) mg2ga.py: read in the test files.
    1918#    +) pmesh_divide.py: subdivide a pmesh
    2019#    +) build_submesh.py: build the submeshes on the host
  • inundation/parallel/run_parallel_sw_merimbula_metis.py

    r2108 r2130  
    1919# *) The (new) files that have been added to manage the
    2020# grid partitioning are
    21 #    +) mg2ga.py: read in the test files.
    2221#    +) pmesh_divide.py: subdivide a pmesh
    2322#    +) build_submesh.py: build the submeshes on the host
  • inundation/parallel/run_parallel_sw_rectangle.py

    r1697 r2130  
    1616# *) The (new) files that have been added to manage the
    1717# grid partitioning are
    18 #    +) mg2ga.py: read in the test files.
    1918#    +) pmesh_divide.py: subdivide a pmesh
    2019#    +) build_submesh.py: build the submeshes on the host
Note: See TracChangeset for help on using the changeset viewer.