source: branches/numpy_anuga_validation/automated_validation_tests/patong_beach_validation/validate.py @ 7192

Last change on this file since 7192 was 7107, checked in by rwilson, 15 years ago

Made proxy authentication less likely to lock out user on errors.

  • Property svn:executable set to *
File size: 15.1 KB
Line 
1'''
2Automatic verification that the ANUGA code runs the Patong simulation
3and produces the expected output.
4
5Required files are downloaded from the ANUGA servers if they are
6out of date or missing.
7'''
8
9import sys
10import os
11import glob
12import unittest
13import time
14import shutil
15
16from anuga.utilities.system_tools import get_web_file, untar_file, file_length
17import anuga.utilities.log as log
18
19
20# sourceforge download mirror hosts (must end with '/')
21# try these in turn for each file
22## NOTE: It would be more reliable if we could somehow 'poll' Sourceforge
23##       for a list of mirrors instead of hard-coding a list here.  The only
24##       way to do this at the moment is to 'screen-scrape' the data at
25##       http://apps.sourceforge.net/trac/sourceforge/wiki/Mirrors
26##       but that only gets the main web page for the entity, not the
27##       Sourceforge download mirror server.
28MIRRORS = ['http://transact.dl.sourceforge.net/sourceforge/anuga/',       # au
29           'http://voxel.dl.sourceforge.net/sourceforge/anuga/',          # us
30           'http://superb-west.dl.sourceforge.net/sourceforge/anuga/',    # us
31           'http://jaist.dl.sourceforge.net/sourceforge/anuga/',          # jp
32           'http://dfn.dl.sourceforge.net/sourceforge/anuga/'             # de
33          ]
34
35### for testing
36##MIRRORS = ['http://10.7.64.243/patong_validation_data/']       # local linux box
37
38# URL to hand-get data files, if required
39DATA_FILES_URL = ('http://sourceforge.net/project/showfiles.php?'
40                  'group_id=172848&package_id=319323&release_id=677531')
41
42# sequence of mandatory local data objects
43Mandatory_Data_Objects = ('data.tgz',)
44
45# sequence of optional local data objects.
46# these names must be of the form <scene>.sww.<type>.tgz
47# as code below depends upon it.
48Optional_Data_Objects = (
49                         'patong.sww.TRIAL.tgz',
50                         'patong.sww.BASIC.tgz',
51                         'patong.sww.FINAL.tgz'
52                        )
53
54# path to the local data directory
55Local_Data_Directory = 'local_data'
56
57# path to the remote data directory
58Remote_Data_Directory = 'remote_data'
59
60# name of stdout catch file for runmodel.py
61RUNMODEL_STDOUT = 'runmodel.stdout'
62
63# text at start of 'output dir' line in RUNMODEL_STDOUT file
64OUTDIR_PREFIX = 'Make directory '
65
66# Name of SWW file produced by run_model.py
67OUTPUT_SWW = 'patong.sww'
68
69
70def setup():
71    '''Prepare for the validation run.'''
72   
73    pass
74
75
76def refresh_local_data(data_objects, target_dir, mirrors):
77    '''Update local data objects from the server.
78
79    data_objects:   list of files to refresh
80    target_dir:     directory in which to put files
81    mirrors:        list of mirror sites to use
82   
83    Each file has an associated *.digest file used to decide
84    if the local file needs refreshing.
85   
86    Return True if all went well, else False.
87    '''
88
89    # decision function to decide if a file contains HTML
90    def is_html(filename):
91        '''Decide if given file contains HTML.'''
92       
93        fd = open(filename)
94        data = fd.read(1024)
95        fd.close()
96
97        if 'DOCTYPE' in data:
98            return True
99       
100        return False
101
102   
103    # local function to get remote file from one of mirrors
104    def get_remote_from_mirrors(remote, local, auth, mirrors):
105        '''Get 'remote' from one of 'mirrors', put in 'local'.'''
106
107        # Get a unique date+time string to defeat caching.  The idea is to add
108        # this to the end of any URL so proxy sees a different request.
109        cache_defeat = '?' + time.strftime('%Y%m%d%H%M%S')
110
111        # try each mirror when getting file
112        for mirror in mirrors:
113            log.debug('Fetching remote file %s from mirror %s'
114                      % (remote, mirror))
115
116            remote_url = mirror + remote + cache_defeat
117            (result, auth) = get_web_file(remote_url, local, auth=auth)
118            if result and is_html(local)==False:
119                log.debug('Success fetching file %s' % remote)
120                return (True, auth)
121            log.debug('Failure fetching from %s' % mirror)
122            auth = None
123
124        log.debug('Failure fetching file %s' % remote)
125        return (False, auth)           
126               
127
128    # local function to compare contents of two files
129    def files_same(file_a, file_b):
130        '''Compare two files to see if contents are the same.'''
131       
132        fd = open(file_a, 'r')
133        data_a = fd.read()
134        fd.close()
135
136        fd = open(file_b, 'r')
137        data_b = fd.read()
138        fd.close()
139
140        return data_a == data_b
141
142       
143    # local function to update one data object
144    def refresh_object(obj, auth, mirrors):
145        '''Update object 'obj' using authentication tuple 'auth'.
146       
147        Return (True, <updated_auth>) if all went well,
148        else (False, <updated_auth>).
149        '''
150
151        # create local and remote file paths.
152        obj_digest = obj + '.digest'
153       
154        remote_file = os.path.join(Remote_Data_Directory, obj)
155        remote_digest = remote_file + '.digest'
156       
157        local_file = os.path.join(Local_Data_Directory, obj)
158        local_digest = local_file + '.digest'
159       
160        # see if missing either digest or object .tgz
161        if not os.path.exists(local_digest) or not os.path.exists(local_file):
162            # no digest or no object, download both digest and object
163            (res, auth) = get_remote_from_mirrors(obj_digest, local_digest, auth, mirrors)
164            if res:
165                (res, auth) = get_remote_from_mirrors(obj, local_file, auth, mirrors)
166        else:
167            # download object digest to remote data directory
168            (res, auth) = get_remote_from_mirrors(obj_digest, remote_digest, auth, mirrors)
169            if res:
170                if not files_same(local_digest, remote_digest):
171                    # digests differ, refresh object
172                    shutil.move(remote_digest, local_digest)
173                    (res, auth) = get_remote_from_mirrors(obj, local_file, auth, mirrors)
174
175        return (res, auth)
176
177    # create local data directory if required
178    log.debug('Creating local directory: %s' % Local_Data_Directory)
179    if not os.path.exists(Local_Data_Directory):
180        os.mkdir(Local_Data_Directory)
181
182    # clean out remote data copy directory
183    log.debug('Cleaning remote directory: %s' % Remote_Data_Directory)
184    shutil.rmtree(Remote_Data_Directory, ignore_errors=True)
185    os.mkdir(Remote_Data_Directory)
186
187    # success, refresh local files
188    auth = None
189    result = True
190    for data_object in data_objects:
191        log.info("Refreshing file '%s'" % data_object)
192        log.debug('refresh_local_data: getting %s from mirrors, auth=%s'
193                  % (data_object, str(auth)))
194        (res, auth) = refresh_object(data_object, auth, mirrors)
195        log.debug('refresh_local_data: returned (res,auth)=%s,%s'
196                  % (str(res), str(auth)))
197        if res == False:
198            log.info('Refresh of file %s failed.' % data_object)
199            result = False
200            # don't use possibly bad 'auth' again,
201            # some proxies lock out on repeated failures.
202            auth = None
203
204    if result:
205        log.critical('Local data has been refreshed.')
206    else:
207        log.critical('Local data has been refreshed, with one or more errors.')
208    log.critical()
209    return result
210
211
212def can_we_run():
213    '''Decide if we can run with the files we have.
214   
215    Return True if we *can* run, else False.
216
217    Tell user what is happening first, then untar files.
218    '''
219
220    log.critical('Checking if you have the required files to run:')
221
222    # get max width of object name string
223    max_width = 0
224    for obj in Mandatory_Data_Objects:
225        max_width = max(len(obj), max_width)
226    for obj in Optional_Data_Objects:
227        max_width = max(len(obj), max_width)
228
229    # if we don't have *all* mandatory object, can't run
230    have_mandatory_files = True
231    for obj in Mandatory_Data_Objects:
232        obj_path = os.path.join(Local_Data_Directory, obj)
233        if os.path.exists(obj_path):
234            log.info('\t%s  found' % obj.ljust(max_width))
235        else:
236            log.info('\t%s  MISSING AND REQUIRED!' % obj.ljust(max_width))
237            have_mandatory_files = False
238
239    # at least *one* of these must exist
240    have_optional_files = False
241    for obj in Optional_Data_Objects:
242        obj_path = os.path.join(Local_Data_Directory, obj)
243        if os.path.exists(obj_path):
244            have_optional_files = True
245            log.info('\t%s  found' % obj.ljust(max_width))
246        else:
247            log.info('\t%s  MISSING!' % obj.ljust(max_width))
248
249    if not have_mandatory_files or not have_optional_files:
250        log.critical('You must obtain the missing files before you can run '
251                     'this validation.')
252        return False
253
254    log.critical('You have enough required files to run.')
255    log.critical()
256
257    return True
258
259
260def set_environment():
261    # modify environment so we use the local data
262    new_inundationhome = os.path.join(Local_Data_Directory, '')
263    os.environ['INUNDATIONHOME'] = new_inundationhome
264    new_muxhome = os.path.join(Local_Data_Directory, 'data')
265    os.environ['MUXHOME'] = new_muxhome
266
267
268def run_simulation(vtype, sim_obj):
269    '''Run a simulation.
270
271    Returns True if all went well, else False.
272    '''
273   
274    # untar the object
275    tar_path = os.path.join(Local_Data_Directory, sim_obj)
276    log.info('Untarring %s in directory %s ...'
277             % (tar_path, Local_Data_Directory))
278    untar_file(tar_path, target_dir=Local_Data_Directory)
279
280    # modify project.py template
281    log.debug("Creating '%s' version of project.py" % vtype)
282    fd = open('project_template.py', 'r')
283    project = fd.readlines()
284    fd.close()
285
286    new_project = []
287    for line in project:
288        new_project.append(line.replace('#!SETUP!#', vtype.lower()))
289           
290    fd = open('project.py', 'w')
291    fd.write(''.join(new_project))
292    fd.close()
293   
294    # import new project.py
295    import project
296
297    # run the simulation, produce SWW file
298    log.info('Running the simulation ...')
299    cmd = 'python run_model.py > %s' % RUNMODEL_STDOUT
300    log.debug("run_simulation: doing '%s'" % cmd)
301    res = os.system(cmd)
302    log.debug("run_simulation: res=%d" % res)
303
304    # 'unimport' project.py
305    del project
306
307    # check result
308    if res != 0:
309        log.critical('Simulation failed, check log')
310
311    return res == 0
312
313def check_that_output_is_as_expected(expected_sww, valid_sww):
314    '''Check that validation output is as required.'''
315
316    # get path to expected SWW file
317    log.critical('Checking that simulation results are as expected ...')
318    local_sww = os.path.join(Local_Data_Directory, valid_sww)
319
320    # get output directory from stdout capture file
321    try:
322        fd = open(RUNMODEL_STDOUT, 'r')
323    except IOError, e:
324        log.critical("Can't open catch file '%s': %s"
325                     % (RUNMODEL_STDOUT, str(e)))
326        return 1
327    lines = fd.readlines()
328    fd.close
329
330    output_directory = None
331    for line in lines:
332        if line.startswith(OUTDIR_PREFIX):
333            output_directory = line.replace(OUTDIR_PREFIX, '', 1)
334            output_directory = output_directory.strip('\n')
335            break
336    if output_directory is None:
337        log.critical("Couldn't find line starting with '%s' in file '%s'"
338                     % (OUTDIR_PREFIX, RUNMODEL_STDOUT))
339        return 1
340
341    log.debug('check_that_output_is_as_expected: output_directory=%s'
342              % output_directory)
343   
344    # compare SWW files here and there
345    new_output_sww = os.path.join(output_directory, expected_sww)
346    cmd = 'python cmpsww.py %s %s > cmpsww.stdout' % (local_sww, new_output_sww)
347    log.debug("check_that_output_is_as_expected: doing '%s'" % cmd)
348    res = os.system(cmd)
349    log.debug("check_that_output_is_as_expected: res=%d" % res)
350    log.critical()
351    if res == 0:
352        log.info('Simulation results are as expected.')
353    else:
354        log.critical('Simulation results are NOT as expected.')
355        fd = open('cmpsww.stdout', 'r')
356        cmp_error = fd.readlines()
357        fd.close()
358        log.critical(''.join(cmp_error))
359
360
361def teardown():
362    '''Clean up after validation run.'''
363
364    log.debug('teardown: called')
365   
366    # remove remote directory and stdout capture file
367    shutil.rmtree(Remote_Data_Directory, ignore_errors=True)
368    try:
369        os.remove(RUNMODEL_STDOUT)
370    except OSError:
371        pass
372           
373
374################################################################################
375# Mainline - run the simulation, check output.
376################################################################################
377
378# set logging levels
379log.console_logging_level = log.INFO
380log.log_logging_level = log.DEBUG
381
382setup()
383
384# prepare user for what is about to happen
385log.critical('''
386Please note that this validation test is accurate only on 64bit Linux or
387Windows.  Running the validation on a 32bit operating system will result in
388small differences in the generated mesh which defeats the simplistic test for
389equality between the generated and expected SWW files.
390
391This validation requires a working internet connection to refresh its files.
392You may still run this validation without an internet connection if you have the
393required files.
394
395If you are behind a proxy server you will need to supply your proxy details
396such as the proxy server address and your proxy username and password.  These
397can be defined in one or more of the environment variables:
398    HTTP_PROXY
399    PROXY_USERNAME
400    PROXY_PASSWORD
401if you wish.  If not supplied in environment variables you will be prompted for
402the information.
403''')
404
405
406# make sure local data is up to date
407all_objects = Mandatory_Data_Objects + Optional_Data_Objects
408if not refresh_local_data(all_objects, Local_Data_Directory, MIRRORS):
409    if not can_we_run():
410        log.critical("Can't refresh via the internet and you don't have the "
411                     "required files.")
412        log.critical('Terminating the validation.')
413        log.critical('')
414        log.critical('If you get the missing files from %s' % DATA_FILES_URL)
415        log.critical('then you can try to run the validation again.  Put the '
416                     'files into the directory')
417        log.critical("%s." % Local_Data_Directory)
418        sys.exit(10)
419
420# now untar mandatory objects
421for obj in Mandatory_Data_Objects:
422    tar_path = os.path.join(Local_Data_Directory, obj)
423    log.info('Untarring %s in directory %s ...'
424             % (tar_path, Local_Data_Directory))
425    untar_file(tar_path, target_dir=Local_Data_Directory)
426
427# set required environment variables
428set_environment()
429
430# now run what simulations we can and check output is as expected
431for odo in Optional_Data_Objects:
432    start_time = time.time()
433
434    (_, vtype, _) = odo.rsplit('.', 2)
435    vtype = vtype.lower()
436    log.critical('#' * 72)
437    log.critical("Running Patong '%s' validation ..." % vtype)
438    if run_simulation(vtype, odo):
439        # get SWW names expected and valid, check 'equal'
440        (valid_sww, _) = odo.rsplit('.', 1)
441        (expected_sww, _) = valid_sww.rsplit('.', 1)
442        check_that_output_is_as_expected(expected_sww, valid_sww)
443
444    stop_time = time.time()
445    log.critical("'%s' validation took %.1fs\n\n\n" % (vtype, stop_time - start_time))
446
447# clean up
448log.critical('Tearing down ...')
449teardown()
Note: See TracBrowser for help on using the repository browser.