Changeset 3184
- Timestamp:
- Jun 20, 2006, 11:24:38 AM (18 years ago)
- Location:
- inundation/parallel
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
inundation/parallel/build_commun.py
r3106 r3184 27 27 pass 28 28 29 30 29 import sys 31 from pypar_dist import pypar 30 31 #from pypar_dist import pypar 32 import pypar 32 33 33 34 from build_local import build_local_mesh -
inundation/parallel/documentation/code/RunParallelAdvection.py
r3168 r3184 25 25 # Mesh partition routines 26 26 27 from parallel _meshes import parallel_rectangle27 from parallel.parallel_meshes import parallel_rectangle 28 28 29 29 # Parallel Domain 30 30 31 from parallel _advection import Parallel_Domain32 from parallel _advection import Transmissive_boundary31 from parallel.parallel_advection import Parallel_Domain 32 from parallel.parallel_advection import Transmissive_boundary 33 33 34 34 ############################ … … 117 117 print 'Reduction Communication time %.2f seconds'\ 118 118 %domain.communication_reduce_time 119 120 -
inundation/parallel/documentation/code/RunParallelMerimbulaMetis.py
r2909 r3184 40 40 # pmesh 41 41 42 from p mesh2domain import pmesh_to_domain_instance43 from advection import Domain as Advection_Domain44 from parallel _advection import Parallel_Domain42 from pyvolution.pmesh2domain import pmesh_to_domain_instance 43 from pyvolution.advection import Domain as Advection_Domain 44 from parallel.parallel_advection import Parallel_Domain 45 45 46 from generic_boundary_conditions import Transmissive_boundary46 from pyvolution.generic_boundary_conditions import Transmissive_boundary 47 47 48 48 # Mesh partition routines 49 49 50 from p mesh_divide import pmesh_divide_metis51 from build_submesh import build_submesh52 from build_local import build_local_mesh53 from build_commun import send_submesh, rec_submesh, extract_hostmesh50 from parallel.pmesh_divide import pmesh_divide_metis 51 from parallel.build_submesh import build_submesh 52 from parallel.build_local import build_local_mesh 53 from parallel.build_commun import send_submesh, rec_submesh, extract_hostmesh 54 54 55 55 … … 88 88 # Read in the test files 89 89 90 filename = ' merimbula_10785.tsh'90 filename = 'parallel/merimbula_10785.tsh' 91 91 92 92 mesh_full = pmesh_to_domain_instance(filename, Advection_Domain) -
inundation/parallel/documentation/results.tex
r3168 r3184 78 78 79 79 80 Another way of measuring the performance of the code on a parallel machine is to increase the problem size as the number of processors are increased so that the number of triangles per processor remains roughly the same. We have node carried out measurements of this kind as we usually have static grids and it is not possible to increase the number of triangles. 80 Another way of measuring the performance of the code on a parallel machine is 81 to increase the problem size as the number of processors are increased so that 82 the number of triangles per processor remains roughly the same. We have node 83 carried out measurements of this kind as we usually have static grids and it 84 is not possible to increase the number of triangles. 85 86 \section{Advection, Merimbula Mesh} 87 88 We now look at another advection example, except this time the mesh comes from 89 the Merimbula test problem. Inother words, we ran the code given in Section 90 \ref{subsec:codeRPMM}, except the final time was reduced to 10000 91 (\code{\label{subsec:codeRPMM}). The results are given in Table \ref{tbl:rpm}. 92 These are good efficiency results, especially considering the structure of the 93 Merimbula mesh. Note that since we are solving an advection problem the amount 94 of calculation done on each triangle is relatively low, when we more to other 95 problems that involve more calculations we would expect the computation to 96 communication ratio to increase and thus get an increase in efficiency. 97 98 \begin{table} 99 \caption{Parallel Efficiency Results for the Advection Problem on the 100 Merimbula Mesh {\tt N} = 160, {\tt M} = 160.\label{tbl:rpm}} 101 \begin{center} 102 \begin{tabular}{|c|c c|}\hline 103 $n$ & $T_n$ (sec) & $E_n (\%)$ \\\hline 104 1 &145.17 & \\ 105 2 &77.52 & 94 \\ 106 4 & 41.24 & 88 \\ 107 8 & 22.96 & 79 \\\hline 108 \end{tabular} 109 \end{center} 110 \end{table} -
inundation/parallel/parallel_advection.py
r2813 r3184 67 67 def update_timestep(self, yieldstep, finaltime): 68 68 69 # Calculate local timestep 70 Domain.update_timestep(self, yieldstep, finaltime) 69 #LINDA: 70 # moved the calculation so that it is done after timestep 71 # has been broadcast 72 73 # # Calculate local timestep 74 # Domain.update_timestep(self, yieldstep, finaltime) 71 75 72 76 import time … … 78 82 ltimestep[0] = self.timestep 79 83 gtimestep = zeros( 1, Float) # Buffer for results 80 84 81 85 pypar.raw_reduce(ltimestep, gtimestep, pypar.MIN, 0) 82 86 pypar.broadcast(gtimestep,0) 83 87 84 88 self.timestep = gtimestep[0] 85 89 86 90 self.communication_reduce_time += time.time()-t0 87 91 92 # LINDA: 93 # Now update time stats 94 95 # Calculate local timestep 96 Domain.update_timestep(self, yieldstep, finaltime) 88 97 89 98 def update_ghosts(self): -
inundation/parallel/run_parallel_advection_prof.py
r3120 r3184 55 55 processor_name = pypar.Get_processor_name() 56 56 57 N = 32058 M = 32057 N = 80 58 M = 80 59 59 60 60 ####################### … … 110 110 import hotshot 111 111 profiler = hotshot.Profile("hotshot." + str(numprocs) + "." + str(myid) + ".prof") 112 s = '''for t in domain.evolve(yieldstep = 0.1, finaltime = 0.2):112 s = '''for t in domain.evolve(yieldstep = 0.1, finaltime = 2.0): 113 113 if myid == 0: 114 114 domain.write_time() 115 115 ''' 116 #PUT BACK117 #import hotshot118 #profiler = hotshot.Profile("hotshot." + str(numprocs) + "." + str(myid) + ".prof")119 #s = '''for t in domain.evolve(yieldstep = 0.1, finaltime = 10):120 # if myid == 0:121 # domain.write_time()122 #'''123 116 124 117
Note: See TracChangeset
for help on using the changeset viewer.