Changeset 1563
- Timestamp:
- Jun 30, 2005, 5:41:44 PM (19 years ago)
- Location:
- inundation/ga/storm_surge
- Files:
-
- 1 added
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
inundation/ga/storm_surge/parallel/build_local.py
r1549 r1563 103 103 for c in ghostc: 104 104 if not ghost_recv.has_key(c[1]): 105 ghost_recv[c[1]] = [0, 0 , 0]105 ghost_recv[c[1]] = [0, 0] 106 106 ghost_recv[c[1]][0] = [] 107 107 ghost_recv[c[1]][1] = [] 108 ghost_recv[c[1]][2] = []109 108 ghost_recv[c[1]][0].append(index[c[0]]) 110 109 ghost_recv[c[1]][1].append(c[0]) 111 ghost_recv[c[1]][2].append(0.0) 110 112 111 113 112 # build a temporary copy of the full_send dictionary … … 129 128 neigh_commun = tmp_send[neigh] 130 129 neigh_commun.sort(sort_tup) 131 full_send[neigh] = [0, 0 , 0]130 full_send[neigh] = [0, 0] 132 131 full_send[neigh][0] = [] 133 132 full_send[neigh][1] = [] 134 full_send[neigh][2] = []135 133 for t in neigh_commun: 136 134 full_send[neigh][0].append(t[1]) 137 135 full_send[neigh][1].append(t[0]) 138 full_send[neigh][2].append(0.0) 136 139 137 140 138 … … 143 141 full_send[key][0] = array(full_send[key][0],Int) 144 142 full_send[key][1] = array(full_send[key][1],Int) 145 full_send[key][2] = array(full_send[key][2],Float) 143 146 144 147 145 for key in ghost_recv: 148 146 ghost_recv[key][0] = array(ghost_recv[key][0],Int) 149 147 ghost_recv[key][1] = array(ghost_recv[key][1],Int) 150 ghost_recv[key][2] = array(ghost_recv[key][2],Float) 148 151 149 152 150 -
inundation/ga/storm_surge/parallel/parallel_advection.py
r1558 r1563 11 11 12 12 Ole Nielsen, Stephen Roberts, Duncan Gray, Christopher Zoppou 13 Geoscience Australia, 2004 13 Geoscience Australia, 2004-2005 14 14 """ 15 15 … … 37 37 self.processor = pypar.rank() 38 38 self.numproc = pypar.size() 39 #print 'Processor %d'%self.processor40 #velocity = [(self.processor+1),0.0]41 42 #print 'velocity',velocity43 39 44 40 Advection_Domain.__init__(self, coordinates, vertices, boundary, … … 50 46 self.numproc = pypar.size() 51 47 48 49 # Setup Communication Buffers 50 self.nsys = 1 51 for key in full_send_dict: 52 buffer_shape = full_send_dict[key][0].shape[0] 53 full_send_dict[key].append(zeros( (buffer_shape,self.nsys) ,Float)) 54 55 56 for key in ghost_recv_dict: 57 buffer_shape = ghost_recv_dict[key][0].shape[0] 58 ghost_recv_dict[key].append(zeros( (buffer_shape,self.nsys) ,Float)) 59 52 60 self.full_send_dict = full_send_dict 53 # for key in self.full_send_dict:54 # self.full_send_dict[key][0] = array(self.full_send_dict[key][0],Int)55 # self.full_send_dict[key][2] = array(self.full_send_dict[key][2],Float)56 57 58 61 self.ghost_recv_dict = ghost_recv_dict 59 # for key in self.ghost_recv_dict:60 # self.ghost_recv_dict[key][0] = array(self.ghost_recv_dict[key][0],Int)61 # self.ghost_recv_dict[key][2] = array(self.ghost_recv_dict[key][2],Float)62 62 63 63 self.communication_time = 0.0 64 64 self.communication_reduce_time = 0.0 65 65 66 #print self.full_send_dict67 #print self.ghost_recv_dict68 69 66 def check_integrity(self): 70 67 Advection_Domain.check_integrity(self) … … 72 69 msg = 'Will need to check global and local numbering' 73 70 assert self.conserved_quantities[0] == 'stage', msg 74 75 76 71 77 72 def update_timestep(self, yieldstep, finaltime): … … 100 95 self.communication_reduce_time += time.time()-t0 101 96 97 102 98 def update_ghosts(self): 103 104 self.update_ghosts_second()105 106 107 def update_ghosts_second(self):108 99 109 100 # We must send the information from the full cells and … … 132 123 Xout = self.full_send_dict[send_proc][2] 133 124 134 N = len( Xout)125 N = len(Idf) 135 126 136 127 … … 138 129 # Original python Code 139 130 for i in range(N): 140 Xout[i ] = stage_cv[Idf[i]]131 Xout[i,0] = stage_cv[Idf[i]] 141 132 #============================== 142 133 … … 147 138 code1 = """ 148 139 for (int i=0; i<N ; i++){ 149 Xout(i ) = stage_cv(Idf(i));140 Xout(i,0) = stage_cv(Idf(i)); 150 141 } 151 142 """ … … 166 157 167 158 X = pypar.receive(iproc,X) 168 N = len( X)159 N = len(Idg) 169 160 170 161 #LINDA: had problems getting C code to work … … 174 165 # Origin Python Code 175 166 for i in range(N): 176 stage_cv[Idg[i]] = X[i ]167 stage_cv[Idg[i]] = X[i,0] 177 168 #=========================== 178 169 … … 180 171 code2 = """ 181 172 for (int i=0; i<N; i++){ 182 stage_cv(Idg(i)) = X(i );173 stage_cv(Idg(i)) = X(i,0); 183 174 } 184 175 """ … … 216 207 #weave.inline(code3, ['stage_cv','Idg','Idf','N'], 217 208 # type_converters = converters.blitz, compiler='gcc'); 218 219 self.communication_time += time.time()-t0220 221 222 # if self.ghosts is not None:223 # stage_cv = self.quantities['stage'].centroid_values224 # for triangle in self.ghosts:225 # stage_cv[triangle] = stage_cv[self.ghosts[triangle]]226 227 def update_ghosts_first(self):228 229 # We must send the information from the full cells and230 # receive the information for the ghost cells231 # We have a dictionary of lists with ghosts expecting updates from232 # the separate processors233 234 import weave235 from weave import converters236 237 import time238 t0 = time.time()239 240 241 stage_cv = self.quantities['stage'].centroid_values242 243 for send_proc in self.full_send_dict:244 if send_proc != self.processor:245 Idf = self.full_send_dict[send_proc][0]246 Xout = self.full_send_dict[send_proc][1]247 N = len(Xout)248 249 250 #==============================251 # Original python Code252 for i in range(N):253 Xout[i] = stage_cv[Idf[i]]254 #==============================255 256 257 # code1 = """258 # for (int i=0; i<N ; i++){259 # Xout(i) = stage_cv(Idf(i));260 # }261 # """262 # weave.inline(code1, ['stage_cv','Idf','Xout','N'],263 # type_converters = converters.blitz, compiler='gcc');264 265 pypar.send(Xout,send_proc)266 267 268 #Receive data from the iproc processor269 for recv_proc in self.ghost_recv_dict:270 if recv_proc != self.processor:271 Idg = self.ghost_recv_dict[recv_proc][0]272 X = self.ghost_recv_dict[recv_proc][1]273 274 X = pypar.receive(recv_proc,X)275 N = len(X)276 277 278 #===========================279 # Origin Python Code280 for i in range(N):281 stage_cv[Idg[i]] = X[i]282 #===========================283 284 285 # code2 = """286 # for (int i=0; i<N; i++){287 # stage_cv(Idg(i)) = X(i);288 # }289 # """290 # weave.inline(code2, ['stage_cv','Idg','X','N'],291 # type_converters = converters.blitz, compiler='gcc');292 293 294 #local update of ghost cells295 iproc = self.processor296 if self.full_send_dict.has_key(iproc):297 Idf = self.full_send_dict[iproc][0]298 #print Idf299 Idg = self.ghost_recv_dict[iproc][0]300 N = len(Idg)301 #print Idg302 303 304 #======================================305 # Original python loop306 for i in range(N):307 #print i,Idg[i],Idf[i]308 stage_cv[Idg[i]] = stage_cv[Idf[i]]309 #======================================310 311 312 # code3 = """313 # for (int i=0; i<N; i++){314 # stage_cv(Idg(i)) = stage_cv(Idf(i));315 # }316 # """317 # weave.inline(code3, ['stage_cv','Idg','Idf','N'],318 # type_converters = converters.blitz, compiler='gcc');319 320 209 321 210 self.communication_time += time.time()-t0 … … 353 242 yield(t) 354 243 355 -
inundation/ga/storm_surge/parallel/parallel_meshes.py
r1520 r1563 17 17 18 18 19 def parallel_rectang ular(m_g, n_g, len1_g=1.0, len2_g=1.0, origin_g = (0.0, 0.0)):19 def parallel_rectangle(m_g, n_g, len1_g=1.0, len2_g=1.0, origin_g = (0.0, 0.0)): 20 20 21 21 … … 92 92 boundary = {} 93 93 Idgl = [] 94 Xgl = []95 94 Idfl = [] 96 Xfl = []97 95 Idgr = [] 98 Xgr = []99 96 Idfr = [] 100 Xfr = []101 97 102 98 full_send_dict = {} … … 152 148 153 149 if numproc==1: 154 #print Idfl155 #print Idfr156 150 Idfl.extend(Idfr) 157 #print Idfl158 151 Idgr.extend(Idgl) 159 152 Idfl = array(Idfl,Int) 160 153 Idgr = array(Idgr,Int) 161 Xf = zeros(Idfl.shape,Float) 162 Xg = zeros(Idgr.shape,Float) 163 full_send_dict[processor] = [Idfl, Idfl, Xf] 164 ghost_recv_dict[processor] = [Idgr, Idgr, Xg] 154 full_send_dict[processor] = [Idfl, Idfl] 155 ghost_recv_dict[processor] = [Idgr, Idgr] 165 156 elif numproc == 2: 166 157 Idfl.extend(Idfr) 167 158 Idgr.extend(Idgl) 168 #print Idfl169 159 Idfl = array(Idfl,Int) 170 160 Idgr = array(Idgr,Int) 171 Xf = zeros(Idfl.shape,Float) 172 Xg = zeros(Idgr.shape,Float) 173 full_send_dict[(processor-1)%numproc] = [Idfl, Idfl, Xf] 174 ghost_recv_dict[(processor-1)%numproc] = [Idgr, Idgr, Xg] 161 full_send_dict[(processor-1)%numproc] = [Idfl, Idfl] 162 ghost_recv_dict[(processor-1)%numproc] = [Idgr, Idgr] 175 163 else: 176 164 Idfl = array(Idfl,Int) 177 165 Idgl = array(Idgl,Int) 178 Xfl = zeros(Idfl.shape,Float)179 Xgl = zeros(Idgl.shape,Float)180 166 181 167 Idfr = array(Idfr,Int) 182 168 Idgr = array(Idgr,Int) 183 Xfr = zeros(Idfr.shape,Float) 184 Xgr = zeros(Idgr.shape,Float) 185 186 full_send_dict[(processor-1)%numproc] = [Idfl, Idfl, Xfl] 187 ghost_recv_dict[(processor-1)%numproc] = [Idgl, Idgl, Xgl] 188 full_send_dict[(processor+1)%numproc] = [Idfr, Idfr, Xfr] 189 ghost_recv_dict[(processor+1)%numproc] = [Idgr, Idgr, Xgr] 169 170 full_send_dict[(processor-1)%numproc] = [Idfl, Idfl] 171 ghost_recv_dict[(processor-1)%numproc] = [Idgl, Idgl] 172 full_send_dict[(processor+1)%numproc] = [Idfr, Idfr] 173 ghost_recv_dict[(processor+1)%numproc] = [Idgr, Idgr] 190 174 191 175 return points, elements, boundary, full_send_dict, ghost_recv_dict -
inundation/ga/storm_surge/parallel/parallel_shallow_water.py
r1558 r1563 33 33 34 34 def __init__(self, coordinates, vertices, boundary = None, 35 full_send_dict = None, ghost_recv_dict = None, 36 velocity = None): 35 full_send_dict = None, ghost_recv_dict = None): 37 36 38 37 self.processor = pypar.rank() 39 38 self.numproc = pypar.size() 40 #print 'Processor %d'%self.processor 41 #velocity = [(self.processor+1),0.0] 42 43 #print 'velocity',velocity 44 45 Advection_Domain.__init__(self, coordinates, vertices, boundary, 46 velocity = velocity) 39 40 Shallow_Water_Domain.__init__(self, coordinates, vertices, boundary) 47 41 48 42 N = self.number_of_elements … … 51 45 self.numproc = pypar.size() 52 46 47 # Setup Communication Buffers 48 self.nsys = 3 49 for key in full_send_dict: 50 buffer_shape = full_send_dict[key][0].shape[0] 51 full_send_dict[key].append(zeros( (buffer_shape,self.nsys) ,Float)) 52 53 54 for key in ghost_recv_dict: 55 buffer_shape = ghost_recv_dict[key][0].shape[0] 56 ghost_recv_dict[key].append(zeros( (buffer_shape,self.nsys) ,Float)) 57 53 58 self.full_send_dict = full_send_dict 54 # for key in self.full_send_dict:55 # self.full_send_dict[key][0] = array(self.full_send_dict[key][0],Int)56 # self.full_send_dict[key][2] = array(self.full_send_dict[key][2],Float)57 58 59 59 self.ghost_recv_dict = ghost_recv_dict 60 # for key in self.ghost_recv_dict: 61 # self.ghost_recv_dict[key][0] = array(self.ghost_recv_dict[key][0],Int) 62 # self.ghost_recv_dict[key][2] = array(self.ghost_recv_dict[key][2],Float) 60 63 61 64 62 self.communication_time = 0.0 65 63 self.communication_reduce_time = 0.0 66 64 67 #print self.full_send_dict 68 #print self.ghost_recv_dict 65 69 66 70 67 def check_integrity(self): 71 Advection_Domain.check_integrity(self)68 Shallow_Water_Domain.check_integrity(self) 72 69 73 70 msg = 'Will need to check global and local numbering' 74 71 assert self.conserved_quantities[0] == 'stage', msg 72 assert self.conserved_quantities[1] == 'xmomentum', msg 73 assert self.conserved_quantities[2] == 'ymomentum', msg 75 74 76 75 … … 79 78 80 79 # Calculate local timestep 81 Advection_Domain.update_timestep(self, yieldstep, finaltime)80 Shallow_Water_Domain.update_timestep(self, yieldstep, finaltime) 82 81 83 82 import time … … 101 100 self.communication_reduce_time += time.time()-t0 102 101 102 103 103 def update_ghosts(self): 104 105 self.update_ghosts_second()106 107 108 def update_ghosts_second(self):109 104 110 105 # We must send the information from the full cells and … … 119 114 t0 = time.time() 120 115 121 stage_cv = self.quantities['stage'].centroid_values 116 stage_cv = self.quantities['stage'].centroid_values 117 xmomentum_cv = self.quantities['xmomentum'].centroid_values 118 ymomentum_cv = self.quantities['ymomentum'].centroid_values 122 119 123 120 # update of non-local ghost cells … … 127 124 for send_proc in self.full_send_dict: 128 125 if send_proc != iproc: 129 # LINDA:130 # now store full as local id, global_id, value131 126 132 127 Idf = self.full_send_dict[send_proc][0] 133 128 Xout = self.full_send_dict[send_proc][2] 134 129 135 N = len(Xout) 136 137 138 #============================== 139 # Original python Code 130 N = len(Idf) 131 140 132 for i in range(N): 141 Xout[i] = stage_cv[Idf[i]] 142 #============================== 143 144 145 #LINDA: 146 #could not get the code below to work, kept on complaining about error: no match for call to `(py::list) (int&)' 147 148 code1 = """ 149 for (int i=0; i<N ; i++){ 150 Xout(i) = stage_cv(Idf(i)); 151 } 152 """ 153 #weave.inline(code1, ['stage_cv','Idf','Xout','N'], 154 # type_converters = converters.blitz, compiler='gcc'); 133 Xout[i,0] = stage_cv[Idf[i]] 134 Xout[i,1] = xmomentum_cv[Idf[i]] 135 Xout[i,2] = ymomentum_cv[Idf[i]] 136 155 137 156 138 pypar.send(Xout,send_proc) … … 161 143 if self.ghost_recv_dict.has_key(iproc): 162 144 163 # LINDA:164 # now store ghost as local id, global id, value165 145 Idg = self.ghost_recv_dict[iproc][0] 166 146 X = self.ghost_recv_dict[iproc][2] 167 147 168 148 X = pypar.receive(iproc,X) 169 N = len(X) 170 171 #LINDA: had problems getting C code to work 172 173 174 #=========================== 175 # Origin Python Code 149 N = len(Idg) 150 176 151 for i in range(N): 177 stage_cv[Idg[i]] = X[i] 178 #=========================== 179 180 181 code2 = """ 182 for (int i=0; i<N; i++){ 183 stage_cv(Idg(i)) = X(i); 184 } 185 """ 186 # weave.inline(code2, ['stage_cv','Idg','X','N'], 187 # type_converters = converters.blitz, compiler='gcc'); 152 stage_cv[Idg[i]] = X[i,0] 153 xmomentum_cv[Idg[i]] = X[i,1] 154 ymomentum_cv[Idg[i]] = X[i,2] 155 188 156 189 157 #local update of ghost cells … … 201 169 N = len(Idg) 202 170 203 204 #======================================205 # Original python loop206 171 for i in range(N): 207 #print i,Idg[i],Idf[i] 208 stage_cv[Idg[i]] = stage_cv[Idf[i]] 209 #====================================== 210 211 212 code3 = """ 213 for (int i=0; i<N; i++){ 214 stage_cv(Idg(i)) = stage_cv(Idf(i)); 215 } 216 """ 217 #weave.inline(code3, ['stage_cv','Idg','Idf','N'], 218 # type_converters = converters.blitz, compiler='gcc'); 219 220 self.communication_time += time.time()-t0 221 222 223 # if self.ghosts is not None: 224 # stage_cv = self.quantities['stage'].centroid_values 225 # for triangle in self.ghosts: 226 # stage_cv[triangle] = stage_cv[self.ghosts[triangle]] 227 228 def update_ghosts_first(self): 229 230 # We must send the information from the full cells and 231 # receive the information for the ghost cells 232 # We have a dictionary of lists with ghosts expecting updates from 233 # the separate processors 234 235 import weave 236 from weave import converters 237 238 import time 239 t0 = time.time() 240 241 242 stage_cv = self.quantities['stage'].centroid_values 243 244 for send_proc in self.full_send_dict: 245 if send_proc != self.processor: 246 Idf = self.full_send_dict[send_proc][0] 247 Xout = self.full_send_dict[send_proc][1] 248 N = len(Xout) 249 250 251 #============================== 252 # Original python Code 253 for i in range(N): 254 Xout[i] = stage_cv[Idf[i]] 255 #============================== 256 257 258 # code1 = """ 259 # for (int i=0; i<N ; i++){ 260 # Xout(i) = stage_cv(Idf(i)); 261 # } 262 # """ 263 # weave.inline(code1, ['stage_cv','Idf','Xout','N'], 264 # type_converters = converters.blitz, compiler='gcc'); 265 266 pypar.send(Xout,send_proc) 267 268 269 #Receive data from the iproc processor 270 for recv_proc in self.ghost_recv_dict: 271 if recv_proc != self.processor: 272 Idg = self.ghost_recv_dict[recv_proc][0] 273 X = self.ghost_recv_dict[recv_proc][1] 274 275 X = pypar.receive(recv_proc,X) 276 N = len(X) 277 278 279 #=========================== 280 # Origin Python Code 281 for i in range(N): 282 stage_cv[Idg[i]] = X[i] 283 #=========================== 284 285 286 # code2 = """ 287 # for (int i=0; i<N; i++){ 288 # stage_cv(Idg(i)) = X(i); 289 # } 290 # """ 291 # weave.inline(code2, ['stage_cv','Idg','X','N'], 292 # type_converters = converters.blitz, compiler='gcc'); 293 294 295 #local update of ghost cells 296 iproc = self.processor 297 if self.full_send_dict.has_key(iproc): 298 Idf = self.full_send_dict[iproc][0] 299 #print Idf 300 Idg = self.ghost_recv_dict[iproc][0] 301 N = len(Idg) 302 #print Idg 303 304 305 #====================================== 306 # Original python loop 307 for i in range(N): 308 #print i,Idg[i],Idf[i] 309 stage_cv[Idg[i]] = stage_cv[Idf[i]] 310 #====================================== 311 312 313 # code3 = """ 314 # for (int i=0; i<N; i++){ 315 # stage_cv(Idg(i)) = stage_cv(Idf(i)); 316 # } 317 # """ 318 # weave.inline(code3, ['stage_cv','Idg','Idf','N'], 319 # type_converters = converters.blitz, compiler='gcc'); 320 172 stage_cv[Idg[i]] = stage_cv[Idf[i]] 173 xmomentum_cv[Idg[i]] = xmomentum_cv[Idf[i]] 174 ymomentum_cv[Idg[i]] = ymomentum_cv[Idf[i]] 321 175 322 176 self.communication_time += time.time()-t0 … … 339 193 340 194 341 342 195 def evolve(self, yieldstep = None, finaltime = None): 343 196 """Specialisation of basic evolve method from parent class … … 349 202 350 203 #Call basic machinery from parent class 351 for t in Advection_Domain.evolve(self, yieldstep, finaltime):204 for t in Shallow_Water_Domain.evolve(self, yieldstep, finaltime): 352 205 353 206 #Pass control on to outer loop for more specific actions … … 355 208 356 209 357 -
inundation/ga/storm_surge/parallel/run_parallel_advection.py
r1520 r1563 6 6 from config import g, epsilon 7 7 from Numeric import allclose, array, zeros, ones, Float 8 from parallel_advection import * 9 from Numeric import array 10 from parallel_meshes import * 8 from parallel_meshes import parallel_rectangle 9 10 from advection import Domain as Advection_Domain 11 from parallel_advection import Parallel_Advection_Domain 12 from parallel_advection import Transmissive_boundary, Dirichlet_boundary 11 13 12 14 import pypar … … 23 25 24 26 points, vertices, boundary, full_send_dict, ghost_recv_dict = \ 25 parallel_rectang ular(N, M, len1_g=1.0)27 parallel_rectangle(N, M, len1_g=1.0) 26 28 27 29 #Create advection domain with direction (1,-1) 28 domain = Parallel_ Domain(points, vertices, boundary,30 domain = Parallel_Advection_Domain(points, vertices, boundary, 29 31 full_send_dict, ghost_recv_dict, velocity=[1.0, 0.0]) 30 32 -
inundation/ga/storm_surge/parallel/run_parallel_merimbula.py
r1559 r1563 49 49 sys.path.append('..'+sep+'pyvolution') 50 50 51 from Numeric import array 51 from Numeric import array, zeros, Float 52 52 # pmesh 53 53 … … 60 60 # mesh partition routines 61 61 62 from pmesh_divide import pmesh_divide63 from build_submesh import *64 from build_local import *65 from build_commun import *62 from pmesh_divide import pmesh_divide 63 from build_submesh import build_submesh, extract_hostmesh 64 from build_local import build_local_mesh 65 from build_commun import send_submesh, rec_submesh 66 66 67 67 # read in the processor information … … 81 81 # filename = 'test-100.tsh' 82 82 filename = 'merimbula_10785.tsh' 83 nx = 383 nx = 2 84 84 ny = 1 85 85 if nx*ny != numprocs: 86 86 print "WARNING: number of subboxes is not equal to the number of proc" 87 87 88 88 [nodes, triangles, boundary, triangles_per_proc, rect] =\ 89 89 pmesh_divide(filename, Advection_Domain, nx, ny) 90 90 91 91 # subdivide the mesh 92 92 … … 98 98 99 99 # send the mesh partition to the appropriate processor 100 100 101 101 for p in range(1, numprocs): 102 102 send_submesh(submesh, triangles_per_proc, p) … … 105 105 [points, vertices, boundary, ghost_recv_dict, full_send_dict] = \ 106 106 build_local_mesh(hostmesh, 0, triangles_per_proc[0], numprocs) 107 107 108 108 # read in the mesh partition that belongs to this 109 109 # processor (note that the information is in the … … 165 165 domain.write_time() 166 166 167 print 'That took %.2f seconds' %(time.time()-t0) 167 if myid == 0: 168 print 'That took %.2f seconds' %(time.time()-t0) -
inundation/ga/storm_surge/parallel/run_parallel_sw_merimbula.py
r1558 r1563 17 17 # grid partitioning are 18 18 # +) mg2ga.py: read in the test files. 19 # +) pmesh_divide.py: subdivide a pmesh 19 20 # +) build_submesh.py: build the submeshes on the host 20 21 # processor. … … 25 26 # 26 27 # *) Things still to do: 27 # +) Fix host commun: The host processor (processor 0)28 # currently uses MPI to communicate the submesh to itself.29 # This is good for testing the communication but is very30 # inefficient and should be removed.31 28 # +) Overlap the communication and computation: The 32 29 # communication routines in build_commun.py should be … … 57 54 #from shallow_water import Domain 58 55 59 from advection importDomain60 from parallel_ advection import Parallel_Domain as Parallel_Advection_Domain56 from shallow_water import Domain as Shallow_Water_Domain 57 from parallel_shallow_water import Parallel_Shallow_Water_Domain 61 58 62 from generic_boundary_conditions import Transmissive_boundary63 59 # mesh partition routines 64 60 … … 82 78 # read in the test files 83 79 84 filename = 'test-100.tsh' 85 [nodes, triangles, boundary, triangles_per_proc, rect] = \ 86 pmesh_divide(filename, Domain, 2, 1) 80 #filename = 'test-100.tsh' 81 filename = 'merimbula_10785.tsh' 82 nx = 2 83 ny = 1 84 if nx*ny != numprocs: 85 print "WARNING: number of subboxes is not equal to the number of proc" 86 87 [nodes, triangles, boundary, triangles_per_proc, rect] =\ 88 pmesh_divide(filename, Shallow_Water_Domain, nx, ny) 87 89 88 90 # subdivide the mesh … … 96 98 # send the mesh partition to the appropriate processor 97 99 98 for p in range( numprocs):100 for p in range(1, numprocs): 99 101 send_submesh(submesh, triangles_per_proc, p) 102 103 hostmesh = extract_hostmesh(submesh) 104 [points, vertices, boundary, ghost_recv_dict, full_send_dict] = \ 105 build_local_mesh(hostmesh, 0, triangles_per_proc[0], numprocs) 100 106 101 107 # read in the mesh partition that belongs to this … … 103 109 # correct form for the GA data structure 104 110 105 106 107 108 [points, vertices, boundary, ghost_recv_dict, full_send_dict] = rec_submesh(0) 111 else: 112 [points, vertices, boundary, ghost_recv_dict, full_send_dict] = \ 113 rec_submesh(0) 109 114 110 115 #if myid == 0: … … 120 125 print rect 121 126 122 domain = Parallel_ Advection_Domain(points, vertices, boundary,127 domain = Parallel_Shallow_Water_Domain(points, vertices, boundary, 123 128 full_send_dict = full_send_dict, 124 ghost_recv_dict = ghost_recv_dict, 125 velocity = [0.1,0.0]) 129 ghost_recv_dict = ghost_recv_dict) 126 130 127 131 domain.initialise_visualiser(rect=rect) 132 domain.default_order = 2 128 133 129 134 #Boundaries 135 from parallel_shallow_water import Transmissive_boundary, Reflective_boundary 130 136 131 137 T = Transmissive_boundary(domain) 132 domain.set_boundary( {'outflow': T, 'inflow': T, 'inner':T, 'exterior': T} ) 138 R = Reflective_boundary(domain) 139 domain.set_boundary( {'outflow': R, 'inflow': R, 'inner':R, 'exterior': R, 'open':R} ) 133 140 134 141 class Set_Stage: … … 144 151 return self.h*((x>self.x0)&(x<self.x1)) 145 152 146 domain.set_quantity('stage', Set_Stage(250.0,300.0,1.0)) 153 #domain.set_quantity('stage', Set_Stage(250.0,300.0,1.0)) 154 domain.set_quantity('stage', Set_Stage(756000.0,756500.0,4.0)) 147 155 148 156 #--------- … … 150 158 t0 = time.time() 151 159 domain.visualise = True 152 yieldstep = 1 153 finaltime = 4000 160 #yieldstep = 0.1 161 #finaltime = 1000 162 163 yieldstep = 10 164 finaltime = 2000 165 154 166 for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime): 155 167 if myid == 0: 156 168 domain.write_time() 157 169 158 print 'That took %.2f seconds' %(time.time()-t0) 170 if myid == 0: 171 print 'That took %.2f seconds' %(time.time()-t0)
Note: See TracChangeset
for help on using the changeset viewer.