Changeset 3184


Ignore:
Timestamp:
Jun 20, 2006, 11:24:38 AM (18 years ago)
Author:
linda
Message:

Changed parallel_advection so the timestep size is updated straight
after the flux calculation. Also added results for advection on the
Merimbula grid

Location:
inundation/parallel
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • inundation/parallel/build_commun.py

    r3106 r3184  
    2727    pass
    2828
    29 
    3029import sys
    31 from pypar_dist import pypar
     30
     31#from pypar_dist import pypar
     32import pypar
    3233
    3334from build_local import build_local_mesh
  • inundation/parallel/documentation/code/RunParallelAdvection.py

    r3168 r3184  
    2525#  Mesh partition routines
    2626
    27 from parallel_meshes import parallel_rectangle
     27from parallel.parallel_meshes import parallel_rectangle
    2828
    2929# Parallel Domain
    3030 
    31 from parallel_advection import Parallel_Domain
    32 from parallel_advection import Transmissive_boundary
     31from parallel.parallel_advection import Parallel_Domain
     32from parallel.parallel_advection import Transmissive_boundary
    3333
    3434############################
     
    117117    print 'Reduction Communication time %.2f seconds'\
    118118          %domain.communication_reduce_time
     119
     120
  • inundation/parallel/documentation/code/RunParallelMerimbulaMetis.py

    r2909 r3184  
    4040# pmesh
    4141
    42 from pmesh2domain import pmesh_to_domain_instance
    43 from advection import Domain as Advection_Domain
    44 from parallel_advection import Parallel_Domain
     42from pyvolution.pmesh2domain import pmesh_to_domain_instance
     43from pyvolution.advection import Domain as Advection_Domain
     44from parallel.parallel_advection import Parallel_Domain
    4545
    46 from generic_boundary_conditions import Transmissive_boundary
     46from pyvolution.generic_boundary_conditions import Transmissive_boundary
    4747
    4848# Mesh partition routines
    4949
    50 from pmesh_divide  import pmesh_divide_metis
    51 from build_submesh import build_submesh
    52 from build_local   import build_local_mesh
    53 from build_commun  import send_submesh, rec_submesh, extract_hostmesh
     50from parallel.pmesh_divide  import pmesh_divide_metis
     51from parallel.build_submesh import build_submesh
     52from parallel.build_local   import build_local_mesh
     53from parallel.build_commun  import send_submesh, rec_submesh, extract_hostmesh
    5454
    5555
     
    8888    # Read in the test files
    8989
    90     filename = 'merimbula_10785.tsh'
     90    filename = 'parallel/merimbula_10785.tsh'
    9191
    9292    mesh_full = pmesh_to_domain_instance(filename, Advection_Domain)
  • inundation/parallel/documentation/results.tex

    r3168 r3184  
    7878
    7979
    80 Another way of measuring the performance of the code on a parallel machine is to increase the problem size as the number of processors are increased so that the number of triangles per processor remains roughly the same.  We have node carried out measurements of this kind as we usually have static grids and it is not possible to increase the number of triangles. 
     80Another way of measuring the performance of the code on a parallel machine is
     81to increase the problem size as the number of processors are increased so that
     82the number of triangles per processor remains roughly the same.  We have node
     83carried out measurements of this kind as we usually have static grids and it
     84is not possible to increase the number of triangles.
     85 
     86\section{Advection, Merimbula Mesh}
     87
     88We now look at another advection example, except this time the mesh comes from
     89the Merimbula test problem. Inother words, we ran the code given in Section
     90\ref{subsec:codeRPMM}, except the final time was reduced to 10000
     91(\code{\label{subsec:codeRPMM}). The results are given in Table \ref{tbl:rpm}.
     92These are good efficiency results, especially considering the structure of the
     93Merimbula mesh. Note that since we are solving an advection problem the amount
     94of calculation done on each triangle is relatively low, when we more to other
     95problems that involve more calculations we would expect the computation to
     96communication ratio to increase and thus get an increase in efficiency.
     97
     98\begin{table}
     99\caption{Parallel Efficiency Results for the Advection Problem on the
     100  Merimbula Mesh {\tt N} = 160, {\tt M} = 160.\label{tbl:rpm}}
     101\begin{center}
     102\begin{tabular}{|c|c c|}\hline
     103$n$ & $T_n$ (sec) & $E_n (\%)$ \\\hline
     1041 &145.17 & \\
     1052 &77.52 & 94 \\
     1064 & 41.24 & 88 \\
     1078 & 22.96 & 79 \\\hline
     108\end{tabular}
     109\end{center}
     110\end{table}
  • inundation/parallel/parallel_advection.py

    r2813 r3184  
    6767    def update_timestep(self, yieldstep, finaltime):
    6868
    69         # Calculate local timestep
    70         Domain.update_timestep(self, yieldstep, finaltime)
     69        #LINDA:
     70        # moved the calculation so that it is done after timestep
     71        # has been broadcast
     72       
     73#        # Calculate local timestep
     74#        Domain.update_timestep(self, yieldstep, finaltime)
    7175
    7276        import time
     
    7882        ltimestep[0] = self.timestep
    7983        gtimestep = zeros( 1, Float) # Buffer for results
    80 
     84       
    8185        pypar.raw_reduce(ltimestep, gtimestep, pypar.MIN, 0)
    8286        pypar.broadcast(gtimestep,0)
    8387
    8488        self.timestep = gtimestep[0]
    85 
     89       
    8690        self.communication_reduce_time += time.time()-t0
    8791
     92        # LINDA:
     93        # Now update time stats
     94       
     95        # Calculate local timestep
     96        Domain.update_timestep(self, yieldstep, finaltime)
    8897
    8998    def update_ghosts(self):
  • inundation/parallel/run_parallel_advection_prof.py

    r3120 r3184  
    5555processor_name = pypar.Get_processor_name()
    5656
    57 N = 320
    58 M = 320
     57N = 80
     58M = 80
    5959
    6060#######################
     
    110110import hotshot
    111111profiler = hotshot.Profile("hotshot." + str(numprocs) + "." + str(myid) + ".prof")
    112 s = '''for t in domain.evolve(yieldstep = 0.1, finaltime = 0.2):
     112s = '''for t in domain.evolve(yieldstep = 0.1, finaltime = 2.0):
    113113  if myid == 0:
    114114    domain.write_time()
    115115'''
    116 #PUT BACK
    117 #import hotshot
    118 #profiler = hotshot.Profile("hotshot." + str(numprocs) + "." + str(myid) + ".prof")
    119 #s = '''for t in domain.evolve(yieldstep = 0.1, finaltime = 10):
    120 #  if myid == 0:
    121 #    domain.write_time()
    122 #'''
    123116   
    124117
Note: See TracChangeset for help on using the changeset viewer.