source: anuga_validation/automated_validation_tests/patong_beach_validation/validate.py @ 7641

Last change on this file since 7641 was 7641, checked in by ole, 14 years ago

Repaired patong validate reading output dir. I still think this is hacky.

  • Property svn:executable set to *
File size: 15.4 KB
RevLine 
[6844]1'''
2Automatic verification that the ANUGA code runs the Patong simulation
3and produces the expected output.
4
5Required files are downloaded from the ANUGA servers if they are
6out of date or missing.
7'''
8
9import sys
10import os
11import glob
12import unittest
13import time
14import shutil
15
[7544]16from anuga.utilities.system_tools \
17     import get_web_file, untar_file, file_length, get_host_name
[6844]18import anuga.utilities.log as log
19
[7567]20log.log_filename = './validation.log'
[6844]21
[6890]22# sourceforge download mirror hosts (must end with '/')
23# try these in turn for each file
24## NOTE: It would be more reliable if we could somehow 'poll' Sourceforge
[6931]25##       for a list of mirrors instead of hard-coding a list here.  The only
26##       way to do this at the moment is to 'screen-scrape' the data at
27##       http://apps.sourceforge.net/trac/sourceforge/wiki/Mirrors
28##       but that only gets the main web page for the entity, not the
29##       Sourceforge download mirror server.
[6890]30MIRRORS = ['http://transact.dl.sourceforge.net/sourceforge/anuga/',       # au
31           'http://voxel.dl.sourceforge.net/sourceforge/anuga/',          # us
32           'http://superb-west.dl.sourceforge.net/sourceforge/anuga/',    # us
33           'http://jaist.dl.sourceforge.net/sourceforge/anuga/',          # jp
34           'http://dfn.dl.sourceforge.net/sourceforge/anuga/'             # de
35          ]
[7276]36
[6931]37### for testing
[6890]38##MIRRORS = ['http://10.7.64.243/patong_validation_data/']       # local linux box
[6844]39
[6890]40# URL to hand-get data files, if required
[6931]41DATA_FILES_URL = ('http://sourceforge.net/project/showfiles.php?'
42                  'group_id=172848&package_id=319323&release_id=677531')
[6890]43
44# sequence of mandatory local data objects
45Mandatory_Data_Objects = ('data.tgz',)
46
[6903]47# sequence of optional local data objects.
48# these names must be of the form <scene>.sww.<type>.tgz
49# as code below depends upon it.
[7640]50#Optional_Data_Objects = (
51#                         'patong.sww.TRIAL.tgz',
52#                         'patong.sww.BASIC.tgz',
53#                         'patong.sww.FINAL.tgz'
54#                        )
55Optional_Data_Objects = ('patong.sww.TRIAL.tgz',)
[6890]56
[6844]57# path to the local data directory
[6890]58Local_Data_Directory = 'local_data'
[6844]59
60# path to the remote data directory
[6890]61Remote_Data_Directory = 'remote_data'
[6844]62
63# name of stdout catch file for runmodel.py
64RUNMODEL_STDOUT = 'runmodel.stdout'
65
66# text at start of 'output dir' line in RUNMODEL_STDOUT file
[7641]67OUTDIR_PREFIX = 'Output directory: '
[6844]68
[6906]69# Name of SWW file produced by run_model.py
[6844]70OUTPUT_SWW = 'patong.sww'
71
72
73def setup():
[7276]74    '''Prepare for the validation run.'''
[6844]75   
76    pass
77
78
[6890]79def refresh_local_data(data_objects, target_dir, mirrors):
[6844]80    '''Update local data objects from the server.
81
[6890]82    data_objects:   list of files to refresh
83    target_dir:     directory in which to put files
84    mirrors:        list of mirror sites to use
85   
86    Each file has an associated *.digest file used to decide
87    if the local file needs refreshing.
88   
[6844]89    Return True if all went well, else False.
90    '''
[6890]91
92    # decision function to decide if a file contains HTML
93    def is_html(filename):
94        '''Decide if given file contains HTML.'''
95       
96        fd = open(filename)
97        data = fd.read(1024)
98        fd.close()
99
100        if 'DOCTYPE' in data:
[6906]101            return True
[6890]102       
[6906]103        return False
[6890]104
[6844]105   
[6890]106    # local function to get remote file from one of mirrors
107    def get_remote_from_mirrors(remote, local, auth, mirrors):
108        '''Get 'remote' from one of 'mirrors', put in 'local'.'''
109
110        # Get a unique date+time string to defeat caching.  The idea is to add
111        # this to the end of any URL so proxy sees a different request.
112        cache_defeat = '?' + time.strftime('%Y%m%d%H%M%S')
113
114        # try each mirror when getting file
115        for mirror in mirrors:
[6891]116            log.debug('Fetching remote file %s from mirror %s'
117                      % (remote, mirror))
[6890]118
119            remote_url = mirror + remote + cache_defeat
[6891]120            (result, auth) = get_web_file(remote_url, local, auth=auth)
121            if result and is_html(local)==False:
[6906]122                log.debug('Success fetching file %s' % remote)
[6891]123                return (True, auth)
[6906]124            log.debug('Failure fetching from %s' % mirror)
[7196]125            auth = None
[6890]126
[6906]127        log.debug('Failure fetching file %s' % remote)
[6891]128        return (False, auth)           
[6890]129               
130
131    # local function to compare contents of two files
132    def files_same(file_a, file_b):
133        '''Compare two files to see if contents are the same.'''
134       
135        fd = open(file_a, 'r')
136        data_a = fd.read()
137        fd.close()
138
139        fd = open(file_b, 'r')
140        data_b = fd.read()
141        fd.close()
142
143        return data_a == data_b
144
145       
[6844]146    # local function to update one data object
[6890]147    def refresh_object(obj, auth, mirrors):
[6844]148        '''Update object 'obj' using authentication tuple 'auth'.
149       
[6891]150        Return (True, <updated_auth>) if all went well,
151        else (False, <updated_auth>).
[6844]152        '''
153
[6890]154        # create local and remote file paths.
155        obj_digest = obj + '.digest'
[6844]156       
[6890]157        remote_file = os.path.join(Remote_Data_Directory, obj)
158        remote_digest = remote_file + '.digest'
159       
160        local_file = os.path.join(Local_Data_Directory, obj)
[6844]161        local_digest = local_file + '.digest'
162       
163        # see if missing either digest or object .tgz
164        if not os.path.exists(local_digest) or not os.path.exists(local_file):
165            # no digest or no object, download both digest and object
[6891]166            (res, auth) = get_remote_from_mirrors(obj_digest, local_digest, auth, mirrors)
167            if res:
168                (res, auth) = get_remote_from_mirrors(obj, local_file, auth, mirrors)
[6844]169        else:
170            # download object digest to remote data directory
[6891]171            (res, auth) = get_remote_from_mirrors(obj_digest, remote_digest, auth, mirrors)
172            if res:
[6918]173                if not files_same(local_digest, remote_digest):
[6890]174                    # digests differ, refresh object
175                    shutil.move(remote_digest, local_digest)
[6891]176                    (res, auth) = get_remote_from_mirrors(obj, local_file, auth, mirrors)
[6903]177
[6891]178        return (res, auth)
[6844]179
180    # create local data directory if required
181    log.debug('Creating local directory: %s' % Local_Data_Directory)
182    if not os.path.exists(Local_Data_Directory):
183        os.mkdir(Local_Data_Directory)
184
185    # clean out remote data copy directory
186    log.debug('Cleaning remote directory: %s' % Remote_Data_Directory)
187    shutil.rmtree(Remote_Data_Directory, ignore_errors=True)
188    os.mkdir(Remote_Data_Directory)
189
[6890]190    # success, refresh local files
[6844]191    auth = None
[6891]192    result = True
193    for data_object in data_objects:
[6903]194        log.info("Refreshing file '%s'" % data_object)
[6891]195        log.debug('refresh_local_data: getting %s from mirrors, auth=%s'
196                  % (data_object, str(auth)))
197        (res, auth) = refresh_object(data_object, auth, mirrors)
198        log.debug('refresh_local_data: returned (res,auth)=%s,%s'
199                  % (str(res), str(auth)))
200        if res == False:
[6903]201            log.info('Refresh of file %s failed.' % data_object)
[6891]202            result = False
[7196]203            # don't use possibly bad 'auth' again,
204            # some proxies lock out on repeated failures.
205            auth = None
[6844]206
[6903]207    if result:
[6925]208        log.critical('Local data has been refreshed.')
[6903]209    else:
[6925]210        log.critical('Local data has been refreshed, with one or more errors.')
211    log.critical()
[6891]212    return result
[6844]213
214
215def can_we_run():
216    '''Decide if we can run with the files we have.
217   
218    Return True if we *can* run, else False.
219
220    Tell user what is happening first, then untar files.
221    '''
222
[6890]223    log.critical('Checking if you have the required files to run:')
[6844]224
[6891]225    # get max width of object name string
[6844]226    max_width = 0
[6890]227    for obj in Mandatory_Data_Objects:
[6844]228        max_width = max(len(obj), max_width)
[6890]229    for obj in Optional_Data_Objects:
230        max_width = max(len(obj), max_width)
[6844]231
[6891]232    # if we don't have *all* mandatory object, can't run
233    have_mandatory_files = True
[6890]234    for obj in Mandatory_Data_Objects:
235        obj_path = os.path.join(Local_Data_Directory, obj)
[6844]236        if os.path.exists(obj_path):
[6890]237            log.info('\t%s  found' % obj.ljust(max_width))
[6844]238        else:
[6890]239            log.info('\t%s  MISSING AND REQUIRED!' % obj.ljust(max_width))
240            have_mandatory_files = False
[6844]241
[6891]242    # at least *one* of these must exist
243    have_optional_files = False
[6890]244    for obj in Optional_Data_Objects:
245        obj_path = os.path.join(Local_Data_Directory, obj)
246        if os.path.exists(obj_path):
[6891]247            have_optional_files = True
[6890]248            log.info('\t%s  found' % obj.ljust(max_width))
249        else:
250            log.info('\t%s  MISSING!' % obj.ljust(max_width))
251
252    if not have_mandatory_files or not have_optional_files:
[6844]253        log.critical('You must obtain the missing files before you can run '
254                     'this validation.')
255        return False
256
[6925]257    log.critical('You have enough required files to run.')
258    log.critical()
[6844]259
260    return True
261
262
[6891]263def set_environment():
264    # modify environment so we use the local data
265    new_inundationhome = os.path.join(Local_Data_Directory, '')
266    os.environ['INUNDATIONHOME'] = new_inundationhome
267    new_muxhome = os.path.join(Local_Data_Directory, 'data')
268    os.environ['MUXHOME'] = new_muxhome
269
270
[6890]271def run_simulation(vtype, sim_obj):
[6891]272    '''Run a simulation.
273
274    Returns True if all went well, else False.
275    '''
[6844]276   
[6891]277    # untar the object
[6890]278    tar_path = os.path.join(Local_Data_Directory, sim_obj)
279    log.info('Untarring %s in directory %s ...'
280             % (tar_path, Local_Data_Directory))
281    untar_file(tar_path, target_dir=Local_Data_Directory)
282
283    # modify project.py template
[6918]284    log.debug("Creating '%s' version of project.py" % vtype)
[7267]285    fd = open('project_template.py', 'r')
[6890]286    project = fd.readlines()
287    fd.close()
288
289    new_project = []
290    for line in project:
291        new_project.append(line.replace('#!SETUP!#', vtype.lower()))
292           
293    fd = open('project.py', 'w')
294    fd.write(''.join(new_project))
295    fd.close()
296   
[6891]297    # import new project.py
[6844]298    import project
299
300    # run the simulation, produce SWW file
[6925]301    log.info('Running the simulation ...')
[7276]302    cmd = 'python run_model.py > %s' % RUNMODEL_STDOUT
[6891]303    log.debug("run_simulation: doing '%s'" % cmd)
[6844]304    res = os.system(cmd)
[6891]305    log.debug("run_simulation: res=%d" % res)
[6844]306
[6891]307    # 'unimport' project.py
308    del project
309
310    # check result
311    if res != 0:
312        log.critical('Simulation failed, check log')
313
314    return res == 0
315
[6903]316def check_that_output_is_as_expected(expected_sww, valid_sww):
[6844]317    '''Check that validation output is as required.'''
318
319    # get path to expected SWW file
320    log.critical('Checking that simulation results are as expected ...')
[6903]321    local_sww = os.path.join(Local_Data_Directory, valid_sww)
[6844]322
323    # get output directory from stdout capture file
324    try:
325        fd = open(RUNMODEL_STDOUT, 'r')
326    except IOError, e:
327        log.critical("Can't open catch file '%s': %s"
328                     % (RUNMODEL_STDOUT, str(e)))
329        return 1
330    lines = fd.readlines()
331    fd.close
332
333    output_directory = None
334    for line in lines:
335        if line.startswith(OUTDIR_PREFIX):
336            output_directory = line.replace(OUTDIR_PREFIX, '', 1)
[7641]337            output_directory = output_directory.strip()
[6844]338            break
339    if output_directory is None:
340        log.critical("Couldn't find line starting with '%s' in file '%s'"
341                     % (OUTDIR_PREFIX, RUNMODEL_STDOUT))
342        return 1
343
[6891]344    log.debug('check_that_output_is_as_expected: output_directory=%s'
345              % output_directory)
346   
[6844]347    # compare SWW files here and there
[6903]348    new_output_sww = os.path.join(output_directory, expected_sww)
[7640]349    #cmd = 'python cmpsww.py %s %s > cmpsww.stdout' % (local_sww, new_output_sww)
350    cmd = 'python compare_model_timeseries.py %s %s > compare_model_timeseries.stdout' % (local_sww, new_output_sww)
[6891]351    log.debug("check_that_output_is_as_expected: doing '%s'" % cmd)
[6844]352    res = os.system(cmd)
[6891]353    log.debug("check_that_output_is_as_expected: res=%d" % res)
[6918]354    log.critical()
[6844]355    if res == 0:
356        log.info('Simulation results are as expected.')
357    else:
358        log.critical('Simulation results are NOT as expected.')
[7641]359        fd = open('compare_model_timeseries.stdout', 'r')
[6844]360        cmp_error = fd.readlines()
361        fd.close()
[6918]362        log.critical(''.join(cmp_error))
[6844]363
364
365def teardown():
366    '''Clean up after validation run.'''
[6891]367
368    log.debug('teardown: called')
[6844]369   
370    # remove remote directory and stdout capture file
[7641]371    #shutil.rmtree(Remote_Data_Directory, ignore_errors=True)
372    #try:
373    #    os.remove(RUNMODEL_STDOUT)
374    #except OSError:
375    #    pass
[6844]376           
377
378################################################################################
379# Mainline - run the simulation, check output.
380################################################################################
381
382# set logging levels
383log.console_logging_level = log.INFO
[6891]384log.log_logging_level = log.DEBUG
[6918]385
[7544]386log.debug("Machine we are running on is '%s'" % get_host_name())
[6844]387setup()
388
389# prepare user for what is about to happen
[7276]390log.critical('''
[7040]391Please note that this validation test is accurate only on 64bit Linux or
392Windows.  Running the validation on a 32bit operating system will result in
393small differences in the generated mesh which defeats the simplistic test for
394equality between the generated and expected SWW files.
395
[6918]396This validation requires a working internet connection to refresh its files.
[6890]397You may still run this validation without an internet connection if you have the
398required files.
399
400If you are behind a proxy server you will need to supply your proxy details
401such as the proxy server address and your proxy username and password.  These
402can be defined in one or more of the environment variables:
403    HTTP_PROXY
404    PROXY_USERNAME
405    PROXY_PASSWORD
406if you wish.  If not supplied in environment variables you will be prompted for
407the information.
[7276]408''')
[6890]409
410
[6844]411# make sure local data is up to date
[6891]412all_objects = Mandatory_Data_Objects + Optional_Data_Objects
413if not refresh_local_data(all_objects, Local_Data_Directory, MIRRORS):
[6844]414    if not can_we_run():
[6890]415        log.critical("Can't refresh via the internet and you don't have the "
[6844]416                     "required files.")
417        log.critical('Terminating the validation.')
418        log.critical('')
[6890]419        log.critical('If you get the missing files from %s' % DATA_FILES_URL)
420        log.critical('then you can try to run the validation again.  Put the '
421                     'files into the directory')
422        log.critical("%s." % Local_Data_Directory)
[6844]423        sys.exit(10)
424
[6891]425# now untar mandatory objects
426for obj in Mandatory_Data_Objects:
427    tar_path = os.path.join(Local_Data_Directory, obj)
428    log.info('Untarring %s in directory %s ...'
429             % (tar_path, Local_Data_Directory))
430    untar_file(tar_path, target_dir=Local_Data_Directory)
431
432# set required environment variables
433set_environment()
434
[6890]435# now run what simulations we can and check output is as expected
436for odo in Optional_Data_Objects:
[6918]437    start_time = time.time()
[7276]438
[6890]439    (_, vtype, _) = odo.rsplit('.', 2)
440    vtype = vtype.lower()
[6918]441    log.critical('#' * 72)
[6891]442    log.critical("Running Patong '%s' validation ..." % vtype)
443    if run_simulation(vtype, odo):
[6903]444        # get SWW names expected and valid, check 'equal'
445        (valid_sww, _) = odo.rsplit('.', 1)
446        (expected_sww, _) = valid_sww.rsplit('.', 1)
447        check_that_output_is_as_expected(expected_sww, valid_sww)
[7276]448
[6918]449    stop_time = time.time()
[7276]450    log.critical("'%s' validation took %.1fs\n\n\n" % (vtype, stop_time - start_time))
[6844]451
452# clean up
[7276]453log.critical('Tearing down ...')
[6891]454teardown()
Note: See TracBrowser for help on using the repository browser.