source: anuga_validation/automated_validation_tests/patong_beach_validation/validate.py @ 7196

Last change on this file since 7196 was 7196, checked in by rwilson, 16 years ago

Forward-merged numpy changes to Numeric trunk.

  • Property svn:executable set to *
File size: 15.2 KB
RevLine 
[6844]1'''
2Automatic verification that the ANUGA code runs the Patong simulation
3and produces the expected output.
4
5Required files are downloaded from the ANUGA servers if they are
6out of date or missing.
7'''
8
9import sys
10import os
11import glob
12import unittest
13import time
14import shutil
15
[6890]16from anuga.utilities.system_tools import get_web_file, untar_file, file_length
[6844]17import anuga.utilities.log as log
18
19
[6890]20# sourceforge download mirror hosts (must end with '/')
21# try these in turn for each file
22## NOTE: It would be more reliable if we could somehow 'poll' Sourceforge
[6931]23##       for a list of mirrors instead of hard-coding a list here.  The only
24##       way to do this at the moment is to 'screen-scrape' the data at
25##       http://apps.sourceforge.net/trac/sourceforge/wiki/Mirrors
26##       but that only gets the main web page for the entity, not the
27##       Sourceforge download mirror server.
[6890]28MIRRORS = ['http://transact.dl.sourceforge.net/sourceforge/anuga/',       # au
29           'http://voxel.dl.sourceforge.net/sourceforge/anuga/',          # us
30           'http://superb-west.dl.sourceforge.net/sourceforge/anuga/',    # us
31           'http://jaist.dl.sourceforge.net/sourceforge/anuga/',          # jp
32           'http://dfn.dl.sourceforge.net/sourceforge/anuga/'             # de
33          ]
[6931]34### for testing
[6890]35##MIRRORS = ['http://10.7.64.243/patong_validation_data/']       # local linux box
[6844]36
[6890]37# URL to hand-get data files, if required
[6931]38DATA_FILES_URL = ('http://sourceforge.net/project/showfiles.php?'
39                  'group_id=172848&package_id=319323&release_id=677531')
[6890]40
41# sequence of mandatory local data objects
42Mandatory_Data_Objects = ('data.tgz',)
43
[6903]44# sequence of optional local data objects.
45# these names must be of the form <scene>.sww.<type>.tgz
46# as code below depends upon it.
[6890]47Optional_Data_Objects = ('patong.sww.TRIAL.tgz',
48                         'patong.sww.BASIC.tgz',
49                         'patong.sww.FINAL.tgz'
50                        )
51
[6844]52# path to the local data directory
[6890]53Local_Data_Directory = 'local_data'
[6844]54
55# path to the remote data directory
[6890]56Remote_Data_Directory = 'remote_data'
[6844]57
58# name of stdout catch file for runmodel.py
59RUNMODEL_STDOUT = 'runmodel.stdout'
60
61# text at start of 'output dir' line in RUNMODEL_STDOUT file
62OUTDIR_PREFIX = 'Make directory '
63
[6906]64# Name of SWW file produced by run_model.py
[6844]65OUTPUT_SWW = 'patong.sww'
66
67
68def setup():
69    '''Prepare for the validation run.
70
71    Check we have required data set in project.py.
72    '''
73   
74    pass
75
76
[6890]77def refresh_local_data(data_objects, target_dir, mirrors):
[6844]78    '''Update local data objects from the server.
79
[6890]80    data_objects:   list of files to refresh
81    target_dir:     directory in which to put files
82    mirrors:        list of mirror sites to use
83   
84    Each file has an associated *.digest file used to decide
85    if the local file needs refreshing.
86   
[6844]87    Return True if all went well, else False.
88    '''
[6890]89
90    # decision function to decide if a file contains HTML
91    def is_html(filename):
92        '''Decide if given file contains HTML.'''
93       
94        fd = open(filename)
95        data = fd.read(1024)
96        fd.close()
97
98        if 'DOCTYPE' in data:
[6906]99            return True
[6890]100       
[6906]101        return False
[6890]102
[6844]103   
[6890]104    # local function to get remote file from one of mirrors
105    def get_remote_from_mirrors(remote, local, auth, mirrors):
106        '''Get 'remote' from one of 'mirrors', put in 'local'.'''
107
108        # Get a unique date+time string to defeat caching.  The idea is to add
109        # this to the end of any URL so proxy sees a different request.
110        cache_defeat = '?' + time.strftime('%Y%m%d%H%M%S')
111
112        # try each mirror when getting file
113        for mirror in mirrors:
[6891]114            log.debug('Fetching remote file %s from mirror %s'
115                      % (remote, mirror))
[6890]116
117            remote_url = mirror + remote + cache_defeat
[6891]118            (result, auth) = get_web_file(remote_url, local, auth=auth)
119            if result and is_html(local)==False:
[6906]120                log.debug('Success fetching file %s' % remote)
[6891]121                return (True, auth)
[6906]122            log.debug('Failure fetching from %s' % mirror)
[7196]123            auth = None
[6890]124
[6906]125        log.debug('Failure fetching file %s' % remote)
[6891]126        return (False, auth)           
[6890]127               
128
129    # local function to compare contents of two files
130    def files_same(file_a, file_b):
131        '''Compare two files to see if contents are the same.'''
132       
133        fd = open(file_a, 'r')
134        data_a = fd.read()
135        fd.close()
136
137        fd = open(file_b, 'r')
138        data_b = fd.read()
139        fd.close()
140
141        return data_a == data_b
142
143       
[6844]144    # local function to update one data object
[6890]145    def refresh_object(obj, auth, mirrors):
[6844]146        '''Update object 'obj' using authentication tuple 'auth'.
147       
[6891]148        Return (True, <updated_auth>) if all went well,
149        else (False, <updated_auth>).
[6844]150        '''
151
[6890]152        # create local and remote file paths.
153        obj_digest = obj + '.digest'
[6844]154       
[6890]155        remote_file = os.path.join(Remote_Data_Directory, obj)
156        remote_digest = remote_file + '.digest'
157       
158        local_file = os.path.join(Local_Data_Directory, obj)
[6844]159        local_digest = local_file + '.digest'
160       
161        # see if missing either digest or object .tgz
162        if not os.path.exists(local_digest) or not os.path.exists(local_file):
163            # no digest or no object, download both digest and object
[6891]164            (res, auth) = get_remote_from_mirrors(obj_digest, local_digest, auth, mirrors)
165            if res:
166                (res, auth) = get_remote_from_mirrors(obj, local_file, auth, mirrors)
[6844]167        else:
168            # download object digest to remote data directory
[6891]169            (res, auth) = get_remote_from_mirrors(obj_digest, remote_digest, auth, mirrors)
170            if res:
[6918]171                if not files_same(local_digest, remote_digest):
[6890]172                    # digests differ, refresh object
173                    shutil.move(remote_digest, local_digest)
[6891]174                    (res, auth) = get_remote_from_mirrors(obj, local_file, auth, mirrors)
[6903]175
[6891]176        return (res, auth)
[6844]177
178    # create local data directory if required
179    log.debug('Creating local directory: %s' % Local_Data_Directory)
180    if not os.path.exists(Local_Data_Directory):
181        os.mkdir(Local_Data_Directory)
182
183    # clean out remote data copy directory
184    log.debug('Cleaning remote directory: %s' % Remote_Data_Directory)
185    shutil.rmtree(Remote_Data_Directory, ignore_errors=True)
186    os.mkdir(Remote_Data_Directory)
187
[6890]188    # success, refresh local files
[6844]189    auth = None
[6891]190    result = True
191    for data_object in data_objects:
[6903]192        log.info("Refreshing file '%s'" % data_object)
[6891]193        log.debug('refresh_local_data: getting %s from mirrors, auth=%s'
194                  % (data_object, str(auth)))
195        (res, auth) = refresh_object(data_object, auth, mirrors)
196        log.debug('refresh_local_data: returned (res,auth)=%s,%s'
197                  % (str(res), str(auth)))
198        if res == False:
[6903]199            log.info('Refresh of file %s failed.' % data_object)
[6891]200            result = False
[7196]201            # don't use possibly bad 'auth' again,
202            # some proxies lock out on repeated failures.
203            auth = None
[6844]204
[6903]205    if result:
[6925]206        log.critical('Local data has been refreshed.')
[6903]207    else:
[6925]208        log.critical('Local data has been refreshed, with one or more errors.')
209    log.critical()
[6891]210    return result
[6844]211
212
213def can_we_run():
214    '''Decide if we can run with the files we have.
215   
216    Return True if we *can* run, else False.
217
218    Tell user what is happening first, then untar files.
219    '''
220
[6890]221    log.critical('Checking if you have the required files to run:')
[6844]222
[6891]223    # get max width of object name string
[6844]224    max_width = 0
[6890]225    for obj in Mandatory_Data_Objects:
[6844]226        max_width = max(len(obj), max_width)
[6890]227    for obj in Optional_Data_Objects:
228        max_width = max(len(obj), max_width)
[6844]229
[6891]230    # if we don't have *all* mandatory object, can't run
231    have_mandatory_files = True
[6890]232    for obj in Mandatory_Data_Objects:
233        obj_path = os.path.join(Local_Data_Directory, obj)
[6844]234        if os.path.exists(obj_path):
[6890]235            log.info('\t%s  found' % obj.ljust(max_width))
[6844]236        else:
[6890]237            log.info('\t%s  MISSING AND REQUIRED!' % obj.ljust(max_width))
238            have_mandatory_files = False
[6844]239
[6891]240    # at least *one* of these must exist
241    have_optional_files = False
[6890]242    for obj in Optional_Data_Objects:
243        obj_path = os.path.join(Local_Data_Directory, obj)
244        if os.path.exists(obj_path):
[6891]245            have_optional_files = True
[6890]246            log.info('\t%s  found' % obj.ljust(max_width))
247        else:
248            log.info('\t%s  MISSING!' % obj.ljust(max_width))
249
250    if not have_mandatory_files or not have_optional_files:
[6844]251        log.critical('You must obtain the missing files before you can run '
252                     'this validation.')
253        return False
254
[6925]255    log.critical('You have enough required files to run.')
256    log.critical()
[6844]257
258    return True
259
260
[6891]261def set_environment():
262    # modify environment so we use the local data
263    new_inundationhome = os.path.join(Local_Data_Directory, '')
264    os.environ['INUNDATIONHOME'] = new_inundationhome
265    new_muxhome = os.path.join(Local_Data_Directory, 'data')
266    os.environ['MUXHOME'] = new_muxhome
267
268
[6890]269def run_simulation(vtype, sim_obj):
[6891]270    '''Run a simulation.
271
272    Returns True if all went well, else False.
273    '''
[6844]274   
[6891]275    # untar the object
[6890]276    tar_path = os.path.join(Local_Data_Directory, sim_obj)
277    log.info('Untarring %s in directory %s ...'
278             % (tar_path, Local_Data_Directory))
279    untar_file(tar_path, target_dir=Local_Data_Directory)
280
281    # modify project.py template
[6918]282    log.debug("Creating '%s' version of project.py" % vtype)
[6890]283    fd = open('project.py.template', 'r')
284    project = fd.readlines()
285    fd.close()
286
287    new_project = []
288    for line in project:
289        new_project.append(line.replace('#!SETUP!#', vtype.lower()))
290           
291    fd = open('project.py', 'w')
292    fd.write(''.join(new_project))
293    fd.close()
294   
[6891]295    # import new project.py
[6844]296    import project
297
298    # run the simulation, produce SWW file
[6925]299    log.info('Running the simulation ...')
[6844]300    cmd = 'python run_model.py > %s' % RUNMODEL_STDOUT
[6891]301    log.debug("run_simulation: doing '%s'" % cmd)
[6844]302    res = os.system(cmd)
[6891]303    log.debug("run_simulation: res=%d" % res)
[6844]304
[6891]305    # 'unimport' project.py
306    del project
307
308    # check result
309    if res != 0:
310        log.critical('Simulation failed, check log')
311
312    return res == 0
313
[6903]314def check_that_output_is_as_expected(expected_sww, valid_sww):
[6844]315    '''Check that validation output is as required.'''
316
317    # get path to expected SWW file
318    log.critical('Checking that simulation results are as expected ...')
[6903]319    local_sww = os.path.join(Local_Data_Directory, valid_sww)
[6844]320
321    # get output directory from stdout capture file
322    try:
323        fd = open(RUNMODEL_STDOUT, 'r')
324    except IOError, e:
325        log.critical("Can't open catch file '%s': %s"
326                     % (RUNMODEL_STDOUT, str(e)))
327        return 1
328    lines = fd.readlines()
329    fd.close
330
331    output_directory = None
332    for line in lines:
333        if line.startswith(OUTDIR_PREFIX):
334            output_directory = line.replace(OUTDIR_PREFIX, '', 1)
335            output_directory = output_directory.strip('\n')
336            break
337    if output_directory is None:
338        log.critical("Couldn't find line starting with '%s' in file '%s'"
339                     % (OUTDIR_PREFIX, RUNMODEL_STDOUT))
340        return 1
341
[6891]342    log.debug('check_that_output_is_as_expected: output_directory=%s'
343              % output_directory)
344   
[6844]345    # compare SWW files here and there
[6903]346    new_output_sww = os.path.join(output_directory, expected_sww)
[6844]347    cmd = 'python cmpsww.py %s %s > cmpsww.stdout' % (local_sww, new_output_sww)
[6891]348    log.debug("check_that_output_is_as_expected: doing '%s'" % cmd)
[6844]349    res = os.system(cmd)
[6891]350    log.debug("check_that_output_is_as_expected: res=%d" % res)
[6918]351    log.critical()
[6844]352    if res == 0:
353        log.info('Simulation results are as expected.')
354    else:
355        log.critical('Simulation results are NOT as expected.')
356        fd = open('cmpsww.stdout', 'r')
357        cmp_error = fd.readlines()
358        fd.close()
[6918]359        log.critical(''.join(cmp_error))
[6844]360
361
362def teardown():
363    '''Clean up after validation run.'''
[6891]364
365    log.debug('teardown: called')
[6844]366   
367    # remove remote directory and stdout capture file
368    shutil.rmtree(Remote_Data_Directory, ignore_errors=True)
369    try:
370        os.remove(RUNMODEL_STDOUT)
371    except OSError:
372        pass
373           
374
375################################################################################
376# Mainline - run the simulation, check output.
377################################################################################
378
379# set logging levels
380log.console_logging_level = log.INFO
[6891]381log.log_logging_level = log.DEBUG
[6918]382log_filename = log.log_filename
383
[6844]384setup()
385
386# prepare user for what is about to happen
387
[6890]388msg = '''
[7040]389Please note that this validation test is accurate only on 64bit Linux or
390Windows.  Running the validation on a 32bit operating system will result in
391small differences in the generated mesh which defeats the simplistic test for
392equality between the generated and expected SWW files.
393
[6918]394This validation requires a working internet connection to refresh its files.
[6890]395You may still run this validation without an internet connection if you have the
396required files.
397
398If you are behind a proxy server you will need to supply your proxy details
399such as the proxy server address and your proxy username and password.  These
400can be defined in one or more of the environment variables:
401    HTTP_PROXY
402    PROXY_USERNAME
403    PROXY_PASSWORD
404if you wish.  If not supplied in environment variables you will be prompted for
405the information.
406'''
407
408log.critical(msg)
409
[6844]410# make sure local data is up to date
[6891]411all_objects = Mandatory_Data_Objects + Optional_Data_Objects
412if not refresh_local_data(all_objects, Local_Data_Directory, MIRRORS):
[6844]413    if not can_we_run():
[6890]414        log.critical("Can't refresh via the internet and you don't have the "
[6844]415                     "required files.")
416        log.critical('Terminating the validation.')
417        log.critical('')
[6890]418        log.critical('If you get the missing files from %s' % DATA_FILES_URL)
419        log.critical('then you can try to run the validation again.  Put the '
420                     'files into the directory')
421        log.critical("%s." % Local_Data_Directory)
[6844]422        sys.exit(10)
423
[6891]424# now untar mandatory objects
425for obj in Mandatory_Data_Objects:
426    tar_path = os.path.join(Local_Data_Directory, obj)
427    log.info('Untarring %s in directory %s ...'
428             % (tar_path, Local_Data_Directory))
429    untar_file(tar_path, target_dir=Local_Data_Directory)
430
431# set required environment variables
432set_environment()
433
[6890]434# now run what simulations we can and check output is as expected
435for odo in Optional_Data_Objects:
[6918]436    start_time = time.time()
[6890]437    (_, vtype, _) = odo.rsplit('.', 2)
438    vtype = vtype.lower()
[6918]439    log.critical('#' * 72)
[6891]440    log.critical("Running Patong '%s' validation ..." % vtype)
441    if run_simulation(vtype, odo):
[6903]442        # get SWW names expected and valid, check 'equal'
443        (valid_sww, _) = odo.rsplit('.', 1)
444        (expected_sww, _) = valid_sww.rsplit('.', 1)
445        check_that_output_is_as_expected(expected_sww, valid_sww)
[6918]446    shutil.move(log_filename, '%s.%s' % (log_filename, vtype))
447    stop_time = time.time()
448    log.critical("'%s' validation took %.1fs" % (vtype, stop_time - start_time))
[6844]449
450# clean up
[6891]451teardown()
Note: See TracBrowser for help on using the repository browser.