source: anuga_validation/automated_validation_tests/patong_beach_validation/validate.py @ 6918

Last change on this file since 6918 was 6918, checked in by rwilson, 16 years ago

Changes to cmpsww.py error reporting.

  • Property svn:executable set to *
File size: 14.5 KB
RevLine 
[6844]1'''
2Automatic verification that the ANUGA code runs the Patong simulation
3and produces the expected output.
4
5Required files are downloaded from the ANUGA servers if they are
6out of date or missing.
7'''
8
9import sys
10import os
11import glob
12import unittest
13import time
14import shutil
15
[6890]16from anuga.utilities.system_tools import get_web_file, untar_file, file_length
[6844]17import anuga.utilities.log as log
18
19
[6890]20# sourceforge download mirror hosts (must end with '/')
21# try these in turn for each file
22## NOTE: It would be more reliable if we could somehow 'poll' Sourceforge
23##       for a list of mirrors instead of hard-coding a list here.  That may
24##       be too difficult, as we only get the chance to select a mirror when
25##       actually downloading a file.
26MIRRORS = ['http://transact.dl.sourceforge.net/sourceforge/anuga/',       # au
27           'http://voxel.dl.sourceforge.net/sourceforge/anuga/',          # us
28           'http://superb-west.dl.sourceforge.net/sourceforge/anuga/',    # us
29           'http://jaist.dl.sourceforge.net/sourceforge/anuga/',          # jp
30           'http://dfn.dl.sourceforge.net/sourceforge/anuga/'             # de
31          ]
32##MIRRORS = ['http://10.7.64.243/patong_validation_data/']       # local linux box
[6844]33
[6890]34# URL to hand-get data files, if required
35DATA_FILES_URL = 'http://sourceforge.net/project/showfiles.php?group_id=172848'
36
37# sequence of mandatory local data objects
38Mandatory_Data_Objects = ('data.tgz',)
39
[6903]40# sequence of optional local data objects.
41# these names must be of the form <scene>.sww.<type>.tgz
42# as code below depends upon it.
[6890]43Optional_Data_Objects = ('patong.sww.TRIAL.tgz',
44                         'patong.sww.BASIC.tgz',
45                         'patong.sww.FINAL.tgz'
46                        )
47
[6844]48# path to the local data directory
[6890]49Local_Data_Directory = 'local_data'
[6844]50
51# path to the remote data directory
[6890]52Remote_Data_Directory = 'remote_data'
[6844]53
54# name of stdout catch file for runmodel.py
55RUNMODEL_STDOUT = 'runmodel.stdout'
56
57# text at start of 'output dir' line in RUNMODEL_STDOUT file
58OUTDIR_PREFIX = 'Make directory '
59
[6906]60# Name of SWW file produced by run_model.py
[6844]61OUTPUT_SWW = 'patong.sww'
62
63
64def setup():
65    '''Prepare for the validation run.
66
67    Check we have required data set in project.py.
68    '''
69   
70    pass
71
72
[6890]73def refresh_local_data(data_objects, target_dir, mirrors):
[6844]74    '''Update local data objects from the server.
75
[6890]76    data_objects:   list of files to refresh
77    target_dir:     directory in which to put files
78    mirrors:        list of mirror sites to use
79   
80    Each file has an associated *.digest file used to decide
81    if the local file needs refreshing.
82   
[6844]83    Return True if all went well, else False.
84    '''
[6890]85
86    # decision function to decide if a file contains HTML
87    def is_html(filename):
88        '''Decide if given file contains HTML.'''
89       
90        fd = open(filename)
91        data = fd.read(1024)
92        fd.close()
93
94        if 'DOCTYPE' in data:
[6906]95            return True
[6890]96       
[6906]97        return False
[6890]98
[6844]99   
[6890]100    # local function to get remote file from one of mirrors
101    def get_remote_from_mirrors(remote, local, auth, mirrors):
102        '''Get 'remote' from one of 'mirrors', put in 'local'.'''
103
104        # Get a unique date+time string to defeat caching.  The idea is to add
105        # this to the end of any URL so proxy sees a different request.
106        cache_defeat = '?' + time.strftime('%Y%m%d%H%M%S')
107
108        # try each mirror when getting file
109        for mirror in mirrors:
[6891]110            log.debug('Fetching remote file %s from mirror %s'
111                      % (remote, mirror))
[6890]112
113            remote_url = mirror + remote + cache_defeat
[6891]114            (result, auth) = get_web_file(remote_url, local, auth=auth)
115            if result and is_html(local)==False:
[6906]116                log.debug('Success fetching file %s' % remote)
[6891]117                return (True, auth)
[6906]118            log.debug('Failure fetching from %s' % mirror)
[6890]119
[6906]120        log.debug('Failure fetching file %s' % remote)
[6891]121        return (False, auth)           
[6890]122               
123
124    # local function to compare contents of two files
125    def files_same(file_a, file_b):
126        '''Compare two files to see if contents are the same.'''
127       
128        fd = open(file_a, 'r')
129        data_a = fd.read()
130        fd.close()
131
132        fd = open(file_b, 'r')
133        data_b = fd.read()
134        fd.close()
135
136        return data_a == data_b
137
138       
[6844]139    # local function to update one data object
[6890]140    def refresh_object(obj, auth, mirrors):
[6844]141        '''Update object 'obj' using authentication tuple 'auth'.
142       
[6891]143        Return (True, <updated_auth>) if all went well,
144        else (False, <updated_auth>).
[6844]145        '''
146
[6890]147        # create local and remote file paths.
148        obj_digest = obj + '.digest'
[6844]149       
[6890]150        remote_file = os.path.join(Remote_Data_Directory, obj)
151        remote_digest = remote_file + '.digest'
152       
153        local_file = os.path.join(Local_Data_Directory, obj)
[6844]154        local_digest = local_file + '.digest'
155       
156        # see if missing either digest or object .tgz
157        if not os.path.exists(local_digest) or not os.path.exists(local_file):
158            # no digest or no object, download both digest and object
[6891]159            (res, auth) = get_remote_from_mirrors(obj_digest, local_digest, auth, mirrors)
160            if res:
161                (res, auth) = get_remote_from_mirrors(obj, local_file, auth, mirrors)
[6844]162        else:
163            # download object digest to remote data directory
[6891]164            (res, auth) = get_remote_from_mirrors(obj_digest, remote_digest, auth, mirrors)
165            if res:
[6918]166                if not files_same(local_digest, remote_digest):
[6890]167                    # digests differ, refresh object
168                    shutil.move(remote_digest, local_digest)
[6891]169                    (res, auth) = get_remote_from_mirrors(obj, local_file, auth, mirrors)
[6903]170
[6891]171        return (res, auth)
[6844]172
173    # create local data directory if required
174    log.debug('Creating local directory: %s' % Local_Data_Directory)
175    if not os.path.exists(Local_Data_Directory):
176        os.mkdir(Local_Data_Directory)
177
178    # clean out remote data copy directory
179    log.debug('Cleaning remote directory: %s' % Remote_Data_Directory)
180    shutil.rmtree(Remote_Data_Directory, ignore_errors=True)
181    os.mkdir(Remote_Data_Directory)
182
[6890]183    # success, refresh local files
[6844]184    auth = None
[6891]185    result = True
186    for data_object in data_objects:
[6903]187        log.info("Refreshing file '%s'" % data_object)
[6891]188        log.debug('refresh_local_data: getting %s from mirrors, auth=%s'
189                  % (data_object, str(auth)))
190        (res, auth) = refresh_object(data_object, auth, mirrors)
191        log.debug('refresh_local_data: returned (res,auth)=%s,%s'
192                  % (str(res), str(auth)))
193        if res == False:
[6903]194            log.info('Refresh of file %s failed.' % data_object)
[6891]195            result = False
[6844]196
[6903]197    if result:
198        log.info('Local data has been refreshed.')
199    else:
200        log.info('Local data has been refreshed, with one or more errors.')
[6891]201    return result
[6844]202
203
204def can_we_run():
205    '''Decide if we can run with the files we have.
206   
207    Return True if we *can* run, else False.
208
209    Tell user what is happening first, then untar files.
210    '''
211
[6890]212    log.critical('Checking if you have the required files to run:')
[6844]213
[6891]214    # get max width of object name string
[6844]215    max_width = 0
[6890]216    for obj in Mandatory_Data_Objects:
[6844]217        max_width = max(len(obj), max_width)
[6890]218    for obj in Optional_Data_Objects:
219        max_width = max(len(obj), max_width)
[6844]220
[6891]221    # if we don't have *all* mandatory object, can't run
222    have_mandatory_files = True
[6890]223    for obj in Mandatory_Data_Objects:
224        obj_path = os.path.join(Local_Data_Directory, obj)
[6844]225        if os.path.exists(obj_path):
[6890]226            log.info('\t%s  found' % obj.ljust(max_width))
[6844]227        else:
[6890]228            log.info('\t%s  MISSING AND REQUIRED!' % obj.ljust(max_width))
229            have_mandatory_files = False
[6844]230
[6891]231    # at least *one* of these must exist
232    have_optional_files = False
[6890]233    for obj in Optional_Data_Objects:
234        obj_path = os.path.join(Local_Data_Directory, obj)
235        if os.path.exists(obj_path):
[6891]236            have_optional_files = True
[6890]237            log.info('\t%s  found' % obj.ljust(max_width))
238        else:
239            log.info('\t%s  MISSING!' % obj.ljust(max_width))
240
241    if not have_mandatory_files or not have_optional_files:
[6844]242        log.critical('You must obtain the missing files before you can run '
243                     'this validation.')
244        return False
245
246    log.critical('You have the required files.')
247
248    return True
249
250
[6891]251def set_environment():
252    # modify environment so we use the local data
253    new_inundationhome = os.path.join(Local_Data_Directory, '')
254    os.environ['INUNDATIONHOME'] = new_inundationhome
255    new_muxhome = os.path.join(Local_Data_Directory, 'data')
256    os.environ['MUXHOME'] = new_muxhome
257
258
[6890]259def run_simulation(vtype, sim_obj):
[6891]260    '''Run a simulation.
261
262    Returns True if all went well, else False.
263    '''
[6844]264   
[6891]265    # untar the object
[6890]266    tar_path = os.path.join(Local_Data_Directory, sim_obj)
267    log.info('Untarring %s in directory %s ...'
268             % (tar_path, Local_Data_Directory))
269    untar_file(tar_path, target_dir=Local_Data_Directory)
270
271    # modify project.py template
[6918]272    log.debug("Creating '%s' version of project.py" % vtype)
[6890]273    fd = open('project.py.template', 'r')
274    project = fd.readlines()
275    fd.close()
276
277    new_project = []
278    for line in project:
279        new_project.append(line.replace('#!SETUP!#', vtype.lower()))
280           
281    fd = open('project.py', 'w')
282    fd.write(''.join(new_project))
283    fd.close()
284   
[6891]285    # import new project.py
[6844]286    import project
287
288    # run the simulation, produce SWW file
[6918]289    log.debug('Running run_model.py')
[6844]290    cmd = 'python run_model.py > %s' % RUNMODEL_STDOUT
[6891]291    log.debug("run_simulation: doing '%s'" % cmd)
[6844]292    res = os.system(cmd)
[6891]293    log.debug("run_simulation: res=%d" % res)
[6844]294
[6891]295    # 'unimport' project.py
296    del project
297
298    # check result
299    if res != 0:
300        log.critical('Simulation failed, check log')
301
302    return res == 0
303
[6903]304def check_that_output_is_as_expected(expected_sww, valid_sww):
[6844]305    '''Check that validation output is as required.'''
306
307    # get path to expected SWW file
308    log.critical('Checking that simulation results are as expected ...')
[6903]309    local_sww = os.path.join(Local_Data_Directory, valid_sww)
[6844]310
311    # get output directory from stdout capture file
312    try:
313        fd = open(RUNMODEL_STDOUT, 'r')
314    except IOError, e:
315        log.critical("Can't open catch file '%s': %s"
316                     % (RUNMODEL_STDOUT, str(e)))
317        return 1
318    lines = fd.readlines()
319    fd.close
320
321    output_directory = None
322    for line in lines:
323        if line.startswith(OUTDIR_PREFIX):
324            output_directory = line.replace(OUTDIR_PREFIX, '', 1)
325            output_directory = output_directory.strip('\n')
326            break
327    if output_directory is None:
328        log.critical("Couldn't find line starting with '%s' in file '%s'"
329                     % (OUTDIR_PREFIX, RUNMODEL_STDOUT))
330        return 1
331
[6891]332    log.debug('check_that_output_is_as_expected: output_directory=%s'
333              % output_directory)
334   
[6844]335    # compare SWW files here and there
[6903]336    new_output_sww = os.path.join(output_directory, expected_sww)
[6844]337    cmd = 'python cmpsww.py %s %s > cmpsww.stdout' % (local_sww, new_output_sww)
[6891]338    log.debug("check_that_output_is_as_expected: doing '%s'" % cmd)
[6844]339    res = os.system(cmd)
[6891]340    log.debug("check_that_output_is_as_expected: res=%d" % res)
[6918]341    log.critical()
[6844]342    if res == 0:
343        log.info('Simulation results are as expected.')
344    else:
345        log.critical('Simulation results are NOT as expected.')
346        fd = open('cmpsww.stdout', 'r')
347        cmp_error = fd.readlines()
348        fd.close()
[6918]349        log.critical(''.join(cmp_error))
[6844]350
351
352def teardown():
353    '''Clean up after validation run.'''
[6891]354
355    log.debug('teardown: called')
[6844]356   
357    # remove remote directory and stdout capture file
358    shutil.rmtree(Remote_Data_Directory, ignore_errors=True)
359    try:
360        os.remove(RUNMODEL_STDOUT)
361    except OSError:
362        pass
363           
364
365################################################################################
366# Mainline - run the simulation, check output.
367################################################################################
368
369# set logging levels
370log.console_logging_level = log.INFO
[6891]371log.log_logging_level = log.DEBUG
[6918]372log_filename = log.log_filename
373
[6844]374setup()
375
376# prepare user for what is about to happen
377
[6890]378msg = '''
[6918]379This validation requires a working internet connection to refresh its files.
[6890]380You may still run this validation without an internet connection if you have the
381required files.
382
383If you are behind a proxy server you will need to supply your proxy details
384such as the proxy server address and your proxy username and password.  These
385can be defined in one or more of the environment variables:
386    HTTP_PROXY
387    PROXY_USERNAME
388    PROXY_PASSWORD
389if you wish.  If not supplied in environment variables you will be prompted for
390the information.
391'''
392
393log.critical(msg)
394
[6844]395# make sure local data is up to date
[6891]396all_objects = Mandatory_Data_Objects + Optional_Data_Objects
397if not refresh_local_data(all_objects, Local_Data_Directory, MIRRORS):
[6844]398    if not can_we_run():
[6890]399        log.critical("Can't refresh via the internet and you don't have the "
[6844]400                     "required files.")
401        log.critical('Terminating the validation.')
402        log.critical('')
[6890]403        log.critical('If you get the missing files from %s' % DATA_FILES_URL)
404        log.critical('then you can try to run the validation again.  Put the '
405                     'files into the directory')
406        log.critical("%s." % Local_Data_Directory)
[6844]407        sys.exit(10)
408
[6891]409# now untar mandatory objects
410for obj in Mandatory_Data_Objects:
411    tar_path = os.path.join(Local_Data_Directory, obj)
412    log.info('Untarring %s in directory %s ...'
413             % (tar_path, Local_Data_Directory))
414    untar_file(tar_path, target_dir=Local_Data_Directory)
415
416# set required environment variables
417set_environment()
418
[6890]419# now run what simulations we can and check output is as expected
420for odo in Optional_Data_Objects:
[6918]421    start_time = time.time()
[6890]422    (_, vtype, _) = odo.rsplit('.', 2)
423    vtype = vtype.lower()
[6918]424    log.critical('#' * 72)
[6891]425    log.critical("Running Patong '%s' validation ..." % vtype)
426    if run_simulation(vtype, odo):
[6903]427        # get SWW names expected and valid, check 'equal'
428        (valid_sww, _) = odo.rsplit('.', 1)
429        (expected_sww, _) = valid_sww.rsplit('.', 1)
430        check_that_output_is_as_expected(expected_sww, valid_sww)
[6918]431    shutil.move(log_filename, '%s.%s' % (log_filename, vtype))
432    stop_time = time.time()
433    log.critical("'%s' validation took %.1fs" % (vtype, stop_time - start_time))
[6844]434
435# clean up
[6891]436teardown()
Note: See TracBrowser for help on using the repository browser.