Changeset 1559


Ignore:
Timestamp:
Jun 30, 2005, 2:50:49 PM (19 years ago)
Author:
linda
Message:

merimbula test run

Location:
inundation/ga/storm_surge/parallel
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • inundation/ga/storm_surge/parallel/build_commun.py

    r1555 r1559  
    1818#########################################################
    1919
     20from Numeric import array, Int, Float
    2021import logging, logging.config
    2122logger = logging.getLogger('parallel')
     
    4950
    5051    print "pypar sending submesh to processor ",p
    51 
     52   
    5253    # build and send the tagmap for the boundary conditions
    5354   
     
    8788
    8889    pypar.send(setup_array, p)
    89 
     90   
    9091    # send the nodes
    9192
     
    9495
    9596    # send the triangles
    96 
    97 
    98     pypar.send(submesh["full_triangles"][p], p, use_buffer=True)
     97   
     98    pypar.send(array(submesh["full_triangles"][p], Int), p, use_buffer=True)
    9999    pypar.send(submesh["ghost_triangles"][p], p, use_buffer=True)
    100100
     
    110110    pypar.send(submesh["ghost_commun"][p], p, use_buffer=True)
    111111    pypar.send(flat_full_commun, p, use_buffer=True)
    112 
    113112
    114113
     
    177176
    178177    no_full_triangles = setup_array[2]
    179     full_triangles = []
     178    submesh_cell["full_triangles"] = []
    180179    for i in range(no_full_triangles):
    181         full_triangles.append([0.0, 0.0, 0.0])
    182     submesh_cell["full_triangles"] = pypar.receive(p, full_triangles)
     180        submesh_cell["full_triangles"].append([0, 0, 0])
     181
     182    full_triangles = pypar.receive(p, array(submesh_cell["full_triangles"], Int))
     183   
     184    for i in range(no_full_triangles):
     185        submesh_cell["full_triangles"][i][0] = full_triangles[i][0]
     186        submesh_cell["full_triangles"][i][1] = full_triangles[i][1]
     187        submesh_cell["full_triangles"][i][2] = full_triangles[i][2]
    183188
    184189    # receive the ghost triangles
     
    240245    # datastructure
    241246
    242     [GAnodes, GAtriangles, boundary, ghost_rec, full_send] = build_local_mesh(submesh_cell, lower_t, upper_t, numproc)
     247    [GAnodes, GAtriangles, boundary, ghost_rec, full_send] = \
     248              build_local_mesh(submesh_cell, lower_t, upper_t, \
     249                               numproc)
    243250
    244251    return GAnodes, GAtriangles, boundary, ghost_rec, full_send
  • inundation/ga/storm_surge/parallel/build_submesh.py

    r1500 r1559  
    382382    return submeshg
    383383
    384 
    385 
    386 
    387 
    388 
    389 
     384#########################################################
     385#
     386# Extract the submesh that will belong to the
     387# "host processor" (i.e. processor zero)
     388#
     389#  *) See the documentation for build_submesh
     390#
     391# -------------------------------------------------------
     392#
     393#  *) A dictionary containing the full_triangles,
     394# full_nodes, full_boundary, ghost_triangles, ghost_nodes,
     395# ghost_commun and full_commun belonging to processor zero
     396# are returned.
     397#
     398#########################################################
     399def extract_hostmesh(submesh):
     400   
     401    submesh_cell = {}
     402    submesh_cell["full_nodes"] = submesh["full_nodes"][0]
     403    submesh_cell["ghost_nodes"] = submesh["ghost_nodes"][0]
     404    submesh_cell["full_triangles"] = submesh["full_triangles"][0]
     405    submesh_cell["ghost_triangles"] = submesh["ghost_triangles"][0]
     406    submesh_cell["full_boundary"] = submesh["full_boundary"][0]
     407    submesh_cell["ghost_commun"] = submesh["ghost_commun"][0]
     408    submesh_cell["full_commun"] = submesh["full_commun"][0]
     409
     410    return submesh_cell
     411
     412
     413
  • inundation/ga/storm_surge/parallel/pmesh_divide.py

    r1556 r1559  
    175175            triangles.append(t)
    176176
    177     # the boundary labels have to changed in accoradance to the
     177    # the boundary labels have to changed in accoradance with the
    178178    # new triangle ordering, proc_sum and tri_index help with this
    179179
  • inundation/ga/storm_surge/parallel/run_parallel_merimbula.py

    r1558 r1559  
    1717# grid partitioning are
    1818#    +) mg2ga.py: read in the test files.
     19#    +) pmesh_divide.py: subdivide a pmesh
    1920#    +) build_submesh.py: build the submeshes on the host
    2021# processor.
     
    2526#
    2627# *) Things still to do:
    27 #    +) Fix host commun: The host processor (processor 0)
    28 # currently uses MPI to communicate the submesh to itself.
    29 # This is good for testing the communication but is very
    30 # inefficient and should be removed.
    3128#    +) Overlap the communication and computation: The
    3229# communication routines in build_commun.py should be
     
    8279    # read in the test files
    8380
    84     filename = 'test-100.tsh'
    85     [nodes, triangles, boundary, triangles_per_proc, rect] = \
    86                        pmesh_divide(filename, Advection_Domain, 2, 1)
    87 
     81#    filename = 'test-100.tsh'
     82    filename = 'merimbula_10785.tsh'
     83    nx = 3
     84    ny = 1
     85    if nx*ny != numprocs:
     86        print "WARNING: number of subboxes is not equal to the number of proc"
     87       
     88    [nodes, triangles, boundary, triangles_per_proc, rect] =\
     89            pmesh_divide(filename, Advection_Domain, nx, ny)
     90   
    8891    # subdivide the mesh
    8992
     
    9598
    9699    # send the mesh partition to the appropriate processor
    97 
    98     for p in range(numprocs):
     100   
     101    for p in range(1, numprocs):
    99102      send_submesh(submesh, triangles_per_proc, p)
    100103
     104    hostmesh = extract_hostmesh(submesh)
     105    [points, vertices, boundary, ghost_recv_dict, full_send_dict] = \
     106             build_local_mesh(hostmesh, 0, triangles_per_proc[0], numprocs)
     107   
    101108# read in the mesh partition that belongs to this
    102109# processor (note that the information is in the
    103110# correct form for the GA data structure
    104111
    105 
    106 
    107 
    108 [points, vertices, boundary, ghost_recv_dict, full_send_dict] = rec_submesh(0)
     112else:
     113    [points, vertices, boundary, ghost_recv_dict, full_send_dict] = \
     114             rec_submesh(0)
    109115
    110116#if myid == 0:
     
    130136
    131137T = Transmissive_boundary(domain)
    132 domain.set_boundary( {'outflow': T, 'inflow': T, 'inner':T, 'exterior': T} )
     138domain.set_boundary( {'outflow': T, 'inflow': T, 'inner':T, 'exterior': T, 'open':T} )
    133139
    134140class Set_Stage:
     
    144150        return self.h*((x>self.x0)&(x<self.x1))
    145151
    146 domain.set_quantity('stage', Set_Stage(250.0,300.0,1.0))
     152#domain.set_quantity('stage', Set_Stage(250.0,300.0,1.0))
     153domain.set_quantity('stage', Set_Stage(756000.0,756500.0,1.0))
    147154
    148155#---------
     
    150157t0 = time.time()
    151158domain.visualise = True
    152 yieldstep = 1
    153 finaltime = 4000
     159#yieldstep = 1
     160yieldstep = 1000
     161finaltime = 50000
     162# finaltime = 4000
    154163for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime):
    155164    if myid == 0:
  • inundation/ga/storm_surge/parallel/run_parallel_mesh.py

    r1555 r1559  
    2525#
    2626# *) Things still to do:
    27 #    +) Fix host commun: The host processor (processor 0)
    28 # currently uses MPI to communicate the submesh to itself.
    29 # This is good for testing the communication but is very
    30 # inefficient and should be removed.
    3127#    +) Overlap the communication and computation: The
    3228# communication routines in build_commun.py should be
     
    107103    # send the mesh partition to the appropriate processor
    108104   
    109     for p in range(numprocs):
     105    for p in range(1, numprocs):
    110106      send_submesh(submesh, triangles_per_proc, p)
    111107
     108    hostmesh = extract_hostmesh(submesh)
     109    [points, vertices, boundary, ghost_recv_dict, full_send_dict] = \
     110             build_local_mesh(hostmesh, 0, triangles_per_proc[0], \
     111                              numprocs)
     112   
    112113# read in the mesh partition that belongs to this
    113114# processor (note that the information is in the
    114115# correct form for the GA data structure
    115116
    116 [points, vertices, boundary, ghost_recv_dict, full_send_dict] = rec_submesh(0)
     117else:
     118    [points, vertices, boundary, ghost_recv_dict, full_send_dict] = rec_submesh(0)
    117119
    118120# define the computation domain
Note: See TracChangeset for help on using the changeset viewer.