Changeset 1575


Ignore:
Timestamp:
Jul 4, 2005, 5:57:46 PM (19 years ago)
Author:
steve
Message:

In the process of copying stage from full domain to sub domains

Location:
inundation/ga/storm_surge
Files:
12 edited

Legend:

Unmodified
Added
Removed
  • inundation/ga/storm_surge/parallel/build_submesh.py

    r1559 r1575  
    66#########################################################
    77#
    8 # Subdivide the triangles into non-overlapping domains. 
     8# Subdivide the triangles into non-overlapping domains.
    99#
    1010#  *)  The subdivision is controlled by triangles_per_proc.
    1111# The first triangles_per_proc[0] triangles are assigned
    1212# to the first processor, the second triangles_per_proc[1]
    13 # are assigned to the second processor etc. 
     13# are assigned to the second processor etc.
    1414#
    1515#  *) nodes, triangles and boundary contains all of the
     
    3030
    3131    # initialise
    32    
     32
    3333    tlower = 0
    3434    nproc = len(triangles_per_proc)
     
    3838    boundary_list = []
    3939    submesh = {}
    40    
     40
    4141    # loop over processors
    42    
     42
    4343    for p in range(nproc):
    44        
     44
    4545        # find triangles on processor p
    46        
     46
    4747        tupper = triangles_per_proc[p]+tlower
    4848        subtriangles = triangles[tlower:tupper]
     
    5555                subboundary[k]=boundary[k]
    5656        boundary_list.append(subboundary)
    57        
     57
    5858        # find nodes in processor p
    59        
     59
    6060        nodemap = map(lambda n: 0, nodes)
    6161        for t in subtriangles:
     
    6363            nodemap[t[1]]=1
    6464            nodemap[t[2]]=1
    65         subnodes = [] 
     65        subnodes = []
    6666        for i in range(nnodes):
    6767            if nodemap[i] == 1:
     
    7070
    7171        # move to the next processor
    72        
     72
    7373        tlower = tupper
    7474
    7575    # put the results in a dictionary
    76    
     76
    7777    submesh["full_nodes"] = node_list
    7878    submesh["full_triangles"] = triangle_list
     
    8080
    8181    # clean up before exiting
    82    
     82
    8383    del (nodemap)
    8484
     
    113113
    114114def ghost_layer(submesh, mesh, p, tupper, tlower):
    115    
     115
    116116    # find the first layer of boundary triangles
    117    
     117
    118118    trianglemap = map(lambda n: 0, mesh.triangles)
    119119    for t in range(tlower, tupper):
     
    132132
    133133    # find the second layer of boundary triangles
    134    
     134
    135135    for t in range(len(trianglemap)):
    136136        if trianglemap[t]==1:
     
    149149
    150150    # build the triangle list and make note of the vertices
    151    
     151
    152152    nodemap = map(lambda n: 0, mesh.coordinates)
    153153    fullnodes = submesh["full_nodes"][p]
     
    162162
    163163    # keep a record of the triangle vertices, if they are not already there
    164    
     164
    165165    subnodes = []
    166166    for n in fullnodes:
     
    172172
    173173    # clean up before exiting
    174    
     174
    175175    del (nodemap)
    176176    del (trianglemap)
    177177
    178178    # return the triangles and vertices sitting on the boundary layer
    179    
     179
    180180    return subnodes, subtriangles
    181181
     
    200200
    201201    # loop over the ghost triangles
    202    
     202
    203203    ghost_commun = []
    204204    for t in subtri:
     
    206206
    207207        # find which processor contains the full triangle
    208        
     208
    209209        nproc = len(tri_per_proc)
    210210        neigh = nproc-1
     
    217217
    218218        # keep a copy of the neighbour processor number
    219        
     219
    220220        ghost_commun.append([global_no, neigh])
    221        
     221
    222222    return ghost_commun
    223    
     223
    224224#########################################################
    225225#
     
    252252
    253253    # loop over the processor
    254    
     254
    255255    for p in range(nproc):
    256256
    257257        # loop over the full triangles in the current processor
    258258        # and build an empty dictionary
    259        
     259
    260260        fcommun = {}
    261261        tupper = tri_per_proc[p]+tlower
     
    266266
    267267    # loop over the processor again
    268    
     268
    269269    for p in range(nproc):
    270270
     
    273273        # and make note that that processor must send updates to this
    274274        # processor
    275        
     275
    276276        for g in submesh["ghost_commun"][p]:
    277277            neigh = g[1]
     
    280280    return full_commun
    281281
    282    
     282
    283283#########################################################
    284284#
     
    305305
    306306def submesh_ghost(submesh, mesh, triangles_per_proc):
    307    
     307
    308308    nproc = len(triangles_per_proc)
    309309    tlower = 0
     
    311311    ghost_nodes = []
    312312    ghost_commun = []
    313    
     313
    314314    # loop over processors
    315    
     315
    316316    for p in range(nproc):
    317317
    318318        # find the full triangles in this processor
    319        
     319
    320320        tupper = triangles_per_proc[p]+tlower
    321321
    322322        # build the ghost boundary layer
    323        
     323
    324324        [subnodes, subtri] = ghost_layer(submesh, mesh, p, tupper, tlower)
    325325        ghost_triangles.append(subtri)
     
    327327
    328328        # build the communication pattern for the ghost nodes
    329        
     329
    330330        gcommun = ghost_commun_pattern(subtri, p, triangles_per_proc)
    331331        ghost_commun.append(gcommun)
    332332
    333333        # move to the next processor
    334        
     334
    335335        tlower = tupper
    336336
    337337    # record the ghost layer and communication pattern
    338    
     338
    339339    submesh["ghost_nodes"] = ghost_nodes
    340340    submesh["ghost_triangles"] = ghost_triangles
     
    342342
    343343    # build the communication pattern for the full triangles
    344    
     344
    345345    full_commun = full_commun_pattern(submesh, triangles_per_proc)
    346346    submesh["full_commun"] = full_commun
    347347
    348348    # return the submesh
    349    
     349
    350350    return submesh
    351351
     
    369369    # temporarily build the mesh to find the neighbouring
    370370    # triangles
    371    
     371
    372372    mesh = Mesh(nodes, triangles)
    373373
    374374    # subdivide into non-overlapping partitions
    375    
     375
    376376    submeshf = submesh_full(nodes, triangles, edges, triangles_per_proc)
    377377
    378378    # add any extra ghost boundary layer information
    379    
     379
    380380    submeshg = submesh_ghost(submeshf, mesh, triangles_per_proc)
    381381
     
    398398#########################################################
    399399def extract_hostmesh(submesh):
    400    
     400
    401401    submesh_cell = {}
    402402    submesh_cell["full_nodes"] = submesh["full_nodes"][0]
     
    411411
    412412
    413 
  • inundation/ga/storm_surge/parallel/parallel_advection.py

    r1563 r1575  
    2424
    2525from advection import *
    26 Advection_Domain = Domain
    2726from Numeric import zeros, Float, Int, ones, allclose, array
    2827import pypar
    2928
    3029
    31 class Parallel_Advection_Domain(Advection_Domain):
     30class Parallel_Domain(Domain):
    3231
    3332    def __init__(self, coordinates, vertices, boundary = None,
     
    3837        self.numproc   = pypar.size()
    3938
    40         Advection_Domain.__init__(self, coordinates, vertices, boundary,
    41                                   velocity = velocity)
     39        Domain.__init__(self, coordinates, vertices, boundary,
     40                        velocity = velocity)
    4241
    4342        N = self.number_of_elements
     
    6564
    6665    def check_integrity(self):
    67         Advection_Domain.check_integrity(self)
     66        Domain.check_integrity(self)
    6867
    6968        msg = 'Will need to check global and local numbering'
     
    7372
    7473        # Calculate local timestep
    75         Advection_Domain.update_timestep(self, yieldstep, finaltime)
     74        Domain.update_timestep(self, yieldstep, finaltime)
    7675
    7776        import time
     
    8584        gtimestep = zeros( 1, Float) # Buffer for results
    8685
    87 
    88         #LINDA
    8986        pypar.raw_reduce(ltimestep, gtimestep, pypar.MIN, 0)
    9087        pypar.broadcast(gtimestep,0)
    91         #pypar.Barrier()
    9288
    9389        self.timestep = gtimestep[0]
     
    10399        # the separate processors
    104100
    105         import weave
    106         from weave import converters
    107 
    108101        import time
    109102        t0 = time.time()
     
    117110                for send_proc in self.full_send_dict:
    118111                    if send_proc != iproc:
    119                         # LINDA:
    120                         # now store full as local id, global_id, value
    121112
    122113                        Idf  = self.full_send_dict[send_proc][0]
     
    125116                        N = len(Idf)
    126117
    127 
    128                         #==============================
    129                         # Original python Code
    130118                        for i in range(N):
    131119                            Xout[i,0] = stage_cv[Idf[i]]
    132                         #==============================
    133 
    134 
    135                         #LINDA:
    136                         #could not get the code below to work, kept on complaining about error: no match for call to `(py::list) (int&)'
    137 
    138                         code1 = """
    139                         for (int i=0; i<N ; i++){
    140                             Xout(i,0) = stage_cv(Idf(i));
    141                         }
    142                         """
    143                         #weave.inline(code1, ['stage_cv','Idf','Xout','N'],
    144                         #             type_converters = converters.blitz, compiler='gcc');
    145120
    146121                        pypar.send(Xout,send_proc)
     
    159134                    N = len(Idg)
    160135
    161                     #LINDA: had problems getting C code to work
    162136
    163 
    164                     #===========================
    165                     # Origin Python Code
    166137                    for i in range(N):
    167138                        stage_cv[Idg[i]] = X[i,0]
    168                     #===========================
    169139
    170 
    171                     code2 = """
    172                     for (int i=0; i<N; i++){
    173                         stage_cv(Idg(i)) = X(i,0);
    174                     }
    175                     """
    176 #                    weave.inline(code2, ['stage_cv','Idg','X','N'],
    177 #                                 type_converters = converters.blitz, compiler='gcc');
    178140
    179141        #local update of ghost cells
     
    191153            N = len(Idg)
    192154
    193 
    194             #======================================
    195             # Original python loop
    196155            for i in range(N):
    197156                #print i,Idg[i],Idf[i]
    198157                stage_cv[Idg[i]] = stage_cv[Idf[i]]
    199             #======================================
    200158
    201159
    202             code3 = """
    203             for (int i=0; i<N; i++){
    204                 stage_cv(Idg(i)) = stage_cv(Idf(i));
    205             }
    206             """
    207             #weave.inline(code3, ['stage_cv','Idg','Idf','N'],
    208             #                     type_converters = converters.blitz, compiler='gcc');
     160
    209161
    210162        self.communication_time += time.time()-t0
     
    237189
    238190        #Call basic machinery from parent class
    239         for t in Advection_Domain.evolve(self, yieldstep, finaltime):
     191        for t in Domain.evolve(self, yieldstep, finaltime):
    240192
    241193            #Pass control on to outer loop for more specific actions
    242194            yield(t)
    243 
  • inundation/ga/storm_surge/parallel/parallel_shallow_water.py

    r1563 r1575  
    2525
    2626from shallow_water import *
    27 Shallow_Water_Domain = Domain
    2827from Numeric import zeros, Float, Int, ones, allclose, array
    2928import pypar
    3029
    3130
    32 class Parallel_Shallow_Water_Domain(Shallow_Water_Domain):
     31class Parallel_Domain(Domain):
    3332
    3433    def __init__(self, coordinates, vertices, boundary = None,
     
    3837        self.numproc   = pypar.size()
    3938
    40         Shallow_Water_Domain.__init__(self, coordinates, vertices, boundary)
     39        Domain.__init__(self, coordinates, vertices, boundary)
    4140
    4241        N = self.number_of_elements
     
    6665
    6766    def check_integrity(self):
    68         Shallow_Water_Domain.check_integrity(self)
     67        Domain.check_integrity(self)
    6968
    7069        msg = 'Will need to check global and local numbering'
     
    7877
    7978        # Calculate local timestep
    80         Shallow_Water_Domain.update_timestep(self, yieldstep, finaltime)
     79        Domain.update_timestep(self, yieldstep, finaltime)
    8180
    8281        import time
     
    108107        # the separate processors
    109108
    110         import weave
    111         from weave import converters
    112109
    113110        import time
     
    202199
    203200        #Call basic machinery from parent class
    204         for t in Shallow_Water_Domain.evolve(self, yieldstep, finaltime):
     201        for t in Domain.evolve(self, yieldstep, finaltime):
    205202
    206203            #Pass control on to outer loop for more specific actions
    207204            yield(t)
    208205
    209 
  • inundation/ga/storm_surge/parallel/pmesh_divide.py

    r1565 r1575  
    1818#########################################################
    1919
    20 from pmesh2domain import pmesh_to_domain_instance
     20
    2121from math import floor
    2222
     
    3939#########################################################
    4040
    41 def pmesh_divide(f, Domain, n_x = 1, n_y = 1):
     41def pmesh_divide_linda(f, Domain, n_x = 1, n_y = 1):
    4242
    4343    # read in the pmesh
     
    128128
    129129
    130 def pmesh_divide_steve(f, Domain, n_x = 1, n_y = 1):
    131 
    132     # read in the pmesh
    133 
    134     domain = pmesh_to_domain_instance(f, Domain)
     130def pmesh_divide(domain, n_x = 1, n_y = 1):
    135131
    136132
     
    146142    y_coord_max = domain.xy_extent[3]
    147143
    148     rect = domain.xy_extent
    149144
    150145    # find the size of each sub-box
     
    213208        boundary[proc_sum[t[0]]+t[1], b[1]] = domain.boundary[b]
    214209
     210    # relabel the vertex value of the stage quantity
     211    from Numeric import zeros, Float
     212    Stage = domain.quantities['stage']
     213
     214    #print Stage.vertex_values
     215
     216    stage_relabelled = zeros( Stage.vertex_values.shape, Float)
     217    for i in range(N):
     218        bin = tri_index[i][0]
     219        bin_off_set = tri_index[i][1]
     220        index = proc_sum[bin]+bin_off_set
     221        stage_relabelled[index] = Stage.vertex_values[i]
     222
     223    #print  stage_relabelled
     224
     225
     226    #print  max(Stage.vertex_values - stage_relabelled)
     227
     228
    215229    # extract the node list
    216230    nodes = domain.coordinates.copy()
    217231
    218     return nodes, triangles, boundary,  triangles_per_proc, rect
    219 
    220 
    221 
     232    return nodes, triangles, boundary, triangles_per_proc, stage_relabelled
     233
  • inundation/ga/storm_surge/parallel/run_advection.py

    r1520 r1575  
    1 import pdb
    2 pdb.set_trace()
     1#import pdb
     2#pdb.set_trace()
    33
    44import sys
     
    4646for t in domain.evolve(yieldstep = 0.1, finaltime = 1.5):
    4747    domain.write_time()
    48     pdb.set_trace()
    49 
     48    #pdb.set_trace()
  • inundation/ga/storm_surge/parallel/run_parallel_advection.py

    r1563 r1575  
    88from parallel_meshes import parallel_rectangle
    99
    10 from advection import Domain as Advection_Domain
    11 from parallel_advection import Parallel_Advection_Domain
     10from advection import Domain
     11from parallel_advection import Parallel_Domain
    1212from parallel_advection import Transmissive_boundary, Dirichlet_boundary
    1313
     
    2828
    2929#Create advection domain with direction (1,-1)
    30 domain = Parallel_Advection_Domain(points, vertices, boundary,
     30domain = Parallel_Domain(points, vertices, boundary,
    3131                         full_send_dict, ghost_recv_dict, velocity=[1.0, 0.0])
    3232
  • inundation/ga/storm_surge/parallel/run_parallel_merimbula.py

    r1563 r1575  
    8686        print "WARNING: number of subboxes is not equal to the number of proc"
    8787
    88     [nodes, triangles, boundary, triangles_per_proc, rect] =\
    89             pmesh_divide(filename, Advection_Domain, nx, ny)
     88    domain_full = pmesh_to_domain_instance(filename, Domain)
     89
     90    nodes, triangles, boundary, triangles_per_proc, stage_relabelled  =\
     91            pmesh_divide(domain_full, nx, ny)
    9092
    9193    # subdivide the mesh
     94    rect = array(domain_full.xy_extent, Float)
    9295
    9396    print rect
  • inundation/ga/storm_surge/parallel/run_parallel_sw_merimbula.py

    r1565 r1575  
    5454#from shallow_water import Domain
    5555
    56 from shallow_water import Domain as Shallow_Water_Domain
    57 from parallel_shallow_water import Parallel_Shallow_Water_Domain
     56from shallow_water import Domain
     57from parallel_shallow_water import Parallel_Domain
    5858
    5959# mesh partition routines
    6060
    61 from pmesh_divide import pmesh_divide, pmesh_divide_steve
     61from pmesh_divide import pmesh_divide
    6262from build_submesh import *
    6363from build_local import *
    6464from build_commun import *
     65from pmesh2domain import pmesh_to_domain_instance
    6566
    6667# read in the processor information
     
    7475rect = zeros( 4, Float) # Buffer for results
    7576
     77class Set_Stage:
     78    """Set an initial condition with constant water height, for x<x0
     79    """
     80
     81    def __init__(self, x0=0.25, x1=0.5, h=1.0):
     82        self.x0 = x0
     83        self.x1 = x1
     84        self.h  = h
     85
     86    def __call__(self, x, y):
     87        return self.h*((x>self.x0)&(x<self.x1))
     88
     89
     90
     91
    7692if myid == 0:
    7793
    7894    # read in the test files
    7995
    80     #filename = 'test-100.tsh'
    81     filename = 'merimbula_10785_1.tsh'
     96    filename = 'test-100.tsh'
     97    #filename = 'merimbula_10785_1.tsh'
    8298    nx = numprocs
    8399    ny = 1
     
    85101        print "WARNING: number of subboxes is not equal to the number of proc"
    86102
    87     [nodes, triangles, boundary, triangles_per_proc, rect] =\
    88             pmesh_divide_steve(filename, Shallow_Water_Domain, nx, ny)
    89 
    90     # subdivide the mesh
    91 
    92     #print rect
    93 
    94     rect = array(rect, Float)
     103    domain_full = pmesh_to_domain_instance(filename, Domain)
     104
     105    domain_full.set_quantity('stage', Set_Stage(200.0,300.0,1.0))
     106
     107
     108    nodes, triangles, boundary, triangles_per_proc, stage_re = \
     109         pmesh_divide(domain_full, nx, ny)
     110
     111    rect = array(domain_full.xy_extent, Float)
     112
     113    print rect
    95114
    96115    submesh = build_submesh(nodes, triangles, boundary, triangles_per_proc)
     116
     117
     118    lower = 0
     119    submesh["full_stage"]  = []
     120    submesh["ghost_stage"] = []
     121
     122    print submesh["full_stage"]
     123
     124    for p in range(numprocs):
     125        upper =   lower+triangles_per_proc[p]
     126        print lower, upper
     127
     128        submesh["full_stage"].append(stage_re[lower:upper])
     129
     130        M = len(submesh["ghost_triangles"][p])
     131        submesh["ghost_stage"].append(zeros( (M,3) , Float))
     132        for j in range(M):
     133            submesh["ghost_stage"][p][j] = stage_re[submesh["ghost_triangles"][p][j][0]]
     134
     135        lower = upper
     136        print p
     137        print submesh["full_stage"][p]
     138        print submesh["ghost_stage"][p]
     139
    97140
    98141    # send the mesh partition to the appropriate processor
     
    102145
    103146    hostmesh = extract_hostmesh(submesh)
    104     [points, vertices, boundary, ghost_recv_dict, full_send_dict] = \
     147    points, vertices, boundary, ghost_recv_dict, full_send_dict = \
    105148             build_local_mesh(hostmesh, 0, triangles_per_proc[0], numprocs)
    106149
     
    110153
    111154else:
    112     [points, vertices, boundary, ghost_recv_dict, full_send_dict] = \
    113              rec_submesh(0)
     155    points, vertices, boundary, ghost_recv_dict, full_send_dict = rec_submesh(0)
    114156
    115157#if myid == 0:
     
    125167#print rect
    126168
    127 domain = Parallel_Shallow_Water_Domain(points, vertices, boundary,
    128                                    full_send_dict  = full_send_dict,
    129                                    ghost_recv_dict = ghost_recv_dict)
     169domain = Parallel_Domain(points, vertices, boundary,
     170                         full_send_dict  = full_send_dict,
     171                         ghost_recv_dict = ghost_recv_dict)
    130172
    131173domain.initialise_visualiser(rect=rect)
    132 domain.default_order = 2
     174domain.default_order = 1
    133175
    134176#Boundaries
     
    139181domain.set_boundary( {'outflow': R, 'inflow': R, 'inner':R, 'exterior': R, 'open':R} )
    140182
    141 class Set_Stage:
    142     """Set an initial condition with constant water height, for x<x0
    143     """
    144 
    145     def __init__(self, x0=0.25, x1=0.5, h=1.0):
    146         self.x0 = x0
    147         self.x1 = x1
    148         self.h  = h
    149 
    150     def __call__(self, x, y):
    151         return self.h*((x>self.x0)&(x<self.x1))
    152 
    153 #domain.set_quantity('stage', Set_Stage(250.0,300.0,1.0))
    154 domain.set_quantity('stage', Set_Stage(756000.0,756500.0,4.0))
     183
     184domain.set_quantity('stage', Set_Stage(250.0,300.0,1.0))
     185#domain.set_quantity('stage', Set_Stage(756000.0,756500.0,4.0))
    155186
    156187#---------
     
    158189t0 = time.time()
    159190domain.visualise = True
    160 #yieldstep = 0.1
    161 #finaltime = 1000
    162 
    163 yieldstep = 10
    164 finaltime = 2000
     191yieldstep = 0.1
     192finaltime = 0.1
     193
     194#yieldstep = 10
     195#finaltime = 10
    165196
    166197for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime):
    167198    if myid == 0:
    168199        domain.write_time()
     200        print domain.quantities['stage'].centroid_values[0]
    169201
    170202if myid == 0:
  • inundation/ga/storm_surge/pyvolution/advection.py

    r1564 r1575  
    7070
    7171    def initialise_visualiser(self,scale_z=1.0,rect=None):
    72         #Realtime visualisation       
     72        #Realtime visualisation
    7373        if self.visualiser is None:
    7474            from realtime_visualisation_new import Visualiser
     
    110110    def compute_fluxes(self):
    111111
    112         self.compute_fluxes_weave()
     112        try:
     113            import weave
     114            self.weave_available = True
     115        except:
     116            self.weave_available = False
     117
     118        if self.weave_available:
     119            self.compute_fluxes_weave()
     120        else:
     121            self.compute_fluxes_python()
     122
     123
    113124
    114125    def compute_fluxes_python(self):
  • inundation/ga/storm_surge/pyvolution/mesh.py

    r1392 r1575  
    586586        #    assert self.neighbours[id,edge] < 0
    587587        #
    588         #NOTE (Ole): I reckon this was resolved late 2004?
    589         #
    590         #See domain.set_boundary
     588        #NOTE (Ole): I reckon this was resolved late 2004?
     589        #
     590        #See domain.set_boundary
    591591
    592592
  • inundation/ga/storm_surge/wiki/issues.txt

    r1137 r1575  
    88Action: See under pmesh/documentation
    99
    10 Issue: geo_reference is not passed into domain and further on to sww file. Hence we can't export results properly to e.g GIS 
     10Issue: geo_reference is not passed into domain and further on to sww file. Hence we can't export results properly to e.g GIS
    1111Importance: High
    1212Action: Make it happen!
    1313
     14Issue: With reflective boundaries, and very small timesteps and very shallow
     15stage, there seems to be a situation where we lose mass. Must investigate!
     16Importance: Mid-High
     17Action: Check out boundary condition
    1418
    15  
     19
     20
    1621CLOSED ISSUES:
    1722------------
    18 Issue: Segments in pmesh that are solely used to separate regions 
    19 (e.g. for different resolution) will appear as an *internal* 
    20 boundary in the resulting domain even though it may not have been intended to 
     23Issue: Segments in pmesh that are solely used to separate regions
     24(e.g. for different resolution) will appear as an *internal*
     25boundary in the resulting domain even though it may not have been intended to
    2126have a boundary condition applied.
    22 Background: Genuine internal boundaries are created the same way as 
    23 external boundaries through the 'boundary' dictionary mapping triangle_id 
     27Background: Genuine internal boundaries are created the same way as
     28external boundaries through the 'boundary' dictionary mapping triangle_id
    2429and edge number to a tag.
    25 When a mesh is created, this dictionary will override the neighbour structure 
     30When a mesh is created, this dictionary will override the neighbour structure
    2631and thereby enforce the internal boundary status.
    2732This is all very well for 'True' internal boundaries.
    28 However, once a segment belongs to a boundary (internal or external) 
    29 it must be bound to a boundary condition object in order to supply 
    30 values for the flux computations. Hence there is no way of allowing 
    31 normal flow between neighbouring triangles in the case where they are 
     33However, once a segment belongs to a boundary (internal or external)
     34it must be bound to a boundary condition object in order to supply
     35values for the flux computations. Hence there is no way of allowing
     36normal flow between neighbouring triangles in the case where they are
    3237separated by an (unintended) internal boundary.
    33 An older version of pyvolution allowed None as a boundary object which 
    34 probably meant that a zero dirichlet condition was imposed. 
     38An older version of pyvolution allowed None as a boundary object which
     39probably meant that a zero dirichlet condition was imposed.
    3540Not sure about this, though.
    3641Importance: High
     
    4045boundary.  It defaults to no tag.
    4146Status: resolved
    42 
Note: See TracChangeset for help on using the changeset viewer.