source: branches/numpy_anuga_validation/automated_validation_tests/patong_beach_validation/validate.py @ 7008

Last change on this file since 7008 was 7008, checked in by rwilson, 15 years ago

Added warning about 32/64 bit differences to instructions.

  • Property svn:executable set to *
File size: 15.1 KB
Line 
1'''
2Automatic verification that the ANUGA code runs the Patong simulation
3and produces the expected output.
4
5Required files are downloaded from the ANUGA servers if they are
6out of date or missing.
7'''
8
9import sys
10import os
11import glob
12import unittest
13import time
14import shutil
15
16from anuga.utilities.system_tools import get_web_file, untar_file, file_length
17import anuga.utilities.log as log
18
19
20# sourceforge download mirror hosts (must end with '/')
21# try these in turn for each file
22## NOTE: It would be more reliable if we could somehow 'poll' Sourceforge
23##       for a list of mirrors instead of hard-coding a list here.  The only
24##       way to do this at the moment is to 'screen-scrape' the data at
25##       http://apps.sourceforge.net/trac/sourceforge/wiki/Mirrors
26##       but that only gets the main web page for the entity, not the
27##       Sourceforge download mirror server.
28MIRRORS = ['http://transact.dl.sourceforge.net/sourceforge/anuga/',       # au
29           'http://voxel.dl.sourceforge.net/sourceforge/anuga/',          # us
30           'http://superb-west.dl.sourceforge.net/sourceforge/anuga/',    # us
31           'http://jaist.dl.sourceforge.net/sourceforge/anuga/',          # jp
32           'http://dfn.dl.sourceforge.net/sourceforge/anuga/'             # de
33          ]
34### for testing
35##MIRRORS = ['http://10.7.64.243/patong_validation_data/']       # local linux box
36
37# URL to hand-get data files, if required
38DATA_FILES_URL = ('http://sourceforge.net/project/showfiles.php?'
39                  'group_id=172848&package_id=319323&release_id=677531')
40
41# sequence of mandatory local data objects
42Mandatory_Data_Objects = ('data.tgz',)
43
44# sequence of optional local data objects.
45# these names must be of the form <scene>.sww.<type>.tgz
46# as code below depends upon it.
47Optional_Data_Objects = ('patong.sww.TRIAL.tgz',
48                         'patong.sww.BASIC.tgz',
49                         'patong.sww.FINAL.tgz',
50                        )
51
52# path to the local data directory
53Local_Data_Directory = 'local_data'
54
55# path to the remote data directory
56Remote_Data_Directory = 'remote_data'
57
58# name of stdout catch file for runmodel.py
59RUNMODEL_STDOUT = 'runmodel.stdout'
60
61# text at start of 'output dir' line in RUNMODEL_STDOUT file
62OUTDIR_PREFIX = 'Make directory '
63
64# Name of SWW file produced by run_model.py
65OUTPUT_SWW = 'patong.sww'
66
67
68def setup():
69    '''Prepare for the validation run.
70
71    Check we have required data set in project.py.
72    '''
73   
74    pass
75
76
77def refresh_local_data(data_objects, target_dir, mirrors):
78    '''Update local data objects from the server.
79
80    data_objects:   list of files to refresh
81    target_dir:     directory in which to put files
82    mirrors:        list of mirror sites to use
83   
84    Each file has an associated *.digest file used to decide
85    if the local file needs refreshing.
86   
87    Return True if all went well, else False.
88    '''
89
90    # decision function to decide if a file contains HTML
91    def is_html(filename):
92        '''Decide if given file contains HTML.'''
93       
94        fd = open(filename)
95        data = fd.read(1024)
96        fd.close()
97
98        if 'DOCTYPE' in data:
99            return True
100       
101        return False
102
103   
104    # local function to get remote file from one of mirrors
105    def get_remote_from_mirrors(remote, local, auth, mirrors):
106        '''Get 'remote' from one of 'mirrors', put in 'local'.'''
107
108        # Get a unique date+time string to defeat caching.  The idea is to add
109        # this to the end of any URL so proxy sees a different request.
110        cache_defeat = '?' + time.strftime('%Y%m%d%H%M%S')
111
112        # try each mirror when getting file
113        for mirror in mirrors:
114            log.debug('Fetching remote file %s from mirror %s'
115                      % (remote, mirror))
116
117            remote_url = mirror + remote + cache_defeat
118            (result, auth) = get_web_file(remote_url, local, auth=auth)
119            if result and is_html(local)==False:
120                log.debug('Success fetching file %s' % remote)
121                return (True, auth)
122            log.debug('Failure fetching from %s' % mirror)
123
124        log.debug('Failure fetching file %s' % remote)
125        return (False, auth)           
126               
127
128    # local function to compare contents of two files
129    def files_same(file_a, file_b):
130        '''Compare two files to see if contents are the same.'''
131       
132        fd = open(file_a, 'r')
133        data_a = fd.read()
134        fd.close()
135
136        fd = open(file_b, 'r')
137        data_b = fd.read()
138        fd.close()
139
140        return data_a == data_b
141
142       
143    # local function to update one data object
144    def refresh_object(obj, auth, mirrors):
145        '''Update object 'obj' using authentication tuple 'auth'.
146       
147        Return (True, <updated_auth>) if all went well,
148        else (False, <updated_auth>).
149        '''
150
151        # create local and remote file paths.
152        obj_digest = obj + '.digest'
153       
154        remote_file = os.path.join(Remote_Data_Directory, obj)
155        remote_digest = remote_file + '.digest'
156       
157        local_file = os.path.join(Local_Data_Directory, obj)
158        local_digest = local_file + '.digest'
159       
160        # see if missing either digest or object .tgz
161        if not os.path.exists(local_digest) or not os.path.exists(local_file):
162            # no digest or no object, download both digest and object
163            (res, auth) = get_remote_from_mirrors(obj_digest, local_digest, auth, mirrors)
164            if res:
165                (res, auth) = get_remote_from_mirrors(obj, local_file, auth, mirrors)
166        else:
167            # download object digest to remote data directory
168            (res, auth) = get_remote_from_mirrors(obj_digest, remote_digest, auth, mirrors)
169            if res:
170                if not files_same(local_digest, remote_digest):
171                    # digests differ, refresh object
172                    shutil.move(remote_digest, local_digest)
173                    (res, auth) = get_remote_from_mirrors(obj, local_file, auth, mirrors)
174
175        return (res, auth)
176
177    # create local data directory if required
178    log.debug('Creating local directory: %s' % Local_Data_Directory)
179    if not os.path.exists(Local_Data_Directory):
180        os.mkdir(Local_Data_Directory)
181
182    # clean out remote data copy directory
183    log.debug('Cleaning remote directory: %s' % Remote_Data_Directory)
184    shutil.rmtree(Remote_Data_Directory, ignore_errors=True)
185    os.mkdir(Remote_Data_Directory)
186
187    # success, refresh local files
188    auth = None
189    result = True
190    for data_object in data_objects:
191        log.info("Refreshing file '%s'" % data_object)
192        log.debug('refresh_local_data: getting %s from mirrors, auth=%s'
193                  % (data_object, str(auth)))
194        (res, auth) = refresh_object(data_object, auth, mirrors)
195        log.debug('refresh_local_data: returned (res,auth)=%s,%s'
196                  % (str(res), str(auth)))
197        if res == False:
198            log.info('Refresh of file %s failed.' % data_object)
199            result = False
200
201    if result:
202        log.critical('Local data has been refreshed.')
203    else:
204        log.critical('Local data has been refreshed, with one or more errors.')
205    log.critical()
206    return result
207
208
209def can_we_run():
210    '''Decide if we can run with the files we have.
211   
212    Return True if we *can* run, else False.
213
214    Tell user what is happening first, then untar files.
215    '''
216
217    log.critical('Checking if you have the required files to run:')
218
219    # get max width of object name string
220    max_width = 0
221    for obj in Mandatory_Data_Objects:
222        max_width = max(len(obj), max_width)
223    for obj in Optional_Data_Objects:
224        max_width = max(len(obj), max_width)
225
226    # if we don't have *all* mandatory object, can't run
227    have_mandatory_files = True
228    for obj in Mandatory_Data_Objects:
229        obj_path = os.path.join(Local_Data_Directory, obj)
230        if os.path.exists(obj_path):
231            log.info('\t%s  found' % obj.ljust(max_width))
232        else:
233            log.info('\t%s  MISSING AND REQUIRED!' % obj.ljust(max_width))
234            have_mandatory_files = False
235
236    # at least *one* of these must exist
237    have_optional_files = False
238    for obj in Optional_Data_Objects:
239        obj_path = os.path.join(Local_Data_Directory, obj)
240        if os.path.exists(obj_path):
241            have_optional_files = True
242            log.info('\t%s  found' % obj.ljust(max_width))
243        else:
244            log.info('\t%s  MISSING!' % obj.ljust(max_width))
245
246    if not have_mandatory_files or not have_optional_files:
247        log.critical('You must obtain the missing files before you can run '
248                     'this validation.')
249        return False
250
251    log.critical('You have enough required files to run.')
252    log.critical()
253
254    return True
255
256
257def set_environment():
258    # modify environment so we use the local data
259    new_inundationhome = os.path.join(Local_Data_Directory, '')
260    os.environ['INUNDATIONHOME'] = new_inundationhome
261    new_muxhome = os.path.join(Local_Data_Directory, 'data')
262    os.environ['MUXHOME'] = new_muxhome
263
264
265def run_simulation(vtype, sim_obj):
266    '''Run a simulation.
267
268    Returns True if all went well, else False.
269    '''
270   
271    # untar the object
272    tar_path = os.path.join(Local_Data_Directory, sim_obj)
273    log.info('Untarring %s in directory %s ...'
274             % (tar_path, Local_Data_Directory))
275    untar_file(tar_path, target_dir=Local_Data_Directory)
276
277    # modify project.py template
278    log.debug("Creating '%s' version of project.py" % vtype)
279    fd = open('project.py.template', 'r')
280    project = fd.readlines()
281    fd.close()
282
283    new_project = []
284    for line in project:
285        new_project.append(line.replace('#!SETUP!#', vtype.lower()))
286           
287    fd = open('project.py', 'w')
288    fd.write(''.join(new_project))
289    fd.close()
290   
291    # import new project.py
292    import project
293
294    # run the simulation, produce SWW file
295    log.info('Running the simulation ...')
296    cmd = 'python run_model.py > %s' % RUNMODEL_STDOUT
297    log.debug("run_simulation: doing '%s'" % cmd)
298    res = os.system(cmd)
299    log.debug("run_simulation: res=%d" % res)
300
301    # 'unimport' project.py
302    del project
303
304    # check result
305    if res != 0:
306        log.critical('Simulation failed, check log')
307
308    return res == 0
309
310def check_that_output_is_as_expected(expected_sww, valid_sww):
311    '''Check that validation output is as required.'''
312
313    # get path to expected SWW file
314    log.critical('Checking that simulation results are as expected ...')
315    local_sww = os.path.join(Local_Data_Directory, valid_sww)
316
317    # get output directory from stdout capture file
318    try:
319        fd = open(RUNMODEL_STDOUT, 'r')
320    except IOError, e:
321        log.critical("Can't open catch file '%s': %s"
322                     % (RUNMODEL_STDOUT, str(e)))
323        return 1
324    lines = fd.readlines()
325    fd.close
326
327    output_directory = None
328    for line in lines:
329        if line.startswith(OUTDIR_PREFIX):
330            output_directory = line.replace(OUTDIR_PREFIX, '', 1)
331            output_directory = output_directory.strip('\n')
332            break
333    if output_directory is None:
334        log.critical("Couldn't find line starting with '%s' in file '%s'"
335                     % (OUTDIR_PREFIX, RUNMODEL_STDOUT))
336        return 1
337
338    log.debug('check_that_output_is_as_expected: output_directory=%s'
339              % output_directory)
340   
341    # compare SWW files here and there
342    new_output_sww = os.path.join(output_directory, expected_sww)
343    cmd = 'python cmpsww.py %s %s > cmpsww.stdout' % (local_sww, new_output_sww)
344    log.debug("check_that_output_is_as_expected: doing '%s'" % cmd)
345    res = os.system(cmd)
346    log.debug("check_that_output_is_as_expected: res=%d" % res)
347    log.critical()
348    if res == 0:
349        log.info('Simulation results are as expected.')
350    else:
351        log.critical('Simulation results are NOT as expected.')
352        fd = open('cmpsww.stdout', 'r')
353        cmp_error = fd.readlines()
354        fd.close()
355        log.critical(''.join(cmp_error))
356
357
358def teardown():
359    '''Clean up after validation run.'''
360
361    log.debug('teardown: called')
362   
363    # remove remote directory and stdout capture file
364    shutil.rmtree(Remote_Data_Directory, ignore_errors=True)
365    try:
366        os.remove(RUNMODEL_STDOUT)
367    except OSError:
368        pass
369           
370
371################################################################################
372# Mainline - run the simulation, check output.
373################################################################################
374
375# set logging levels
376log.console_logging_level = log.INFO
377log.log_logging_level = log.DEBUG
378log_filename = log.log_filename
379
380setup()
381
382# prepare user for what is about to happen
383
384msg = '''
385Please note that this validation test is accurate only on 64bit Linux or
386Windows.  Running the validation on a 32bit operating system will result in
387small differences in the generated mesh which defeats the simplistic test for
388equality between the generated and expected SWW files.
389
390This validation requires a working internet connection to refresh its files.
391You may still run this validation without an internet connection if you have the
392required files.
393
394If you are behind a proxy server you will need to supply your proxy details
395such as the proxy server address and your proxy username and password.  These
396can be defined in one or more of the environment variables:
397    HTTP_PROXY
398    PROXY_USERNAME
399    PROXY_PASSWORD
400if you wish.  If not supplied in environment variables you will be prompted for
401the information.
402'''
403
404log.critical(msg)
405
406# make sure local data is up to date
407all_objects = Mandatory_Data_Objects + Optional_Data_Objects
408if not refresh_local_data(all_objects, Local_Data_Directory, MIRRORS):
409    if not can_we_run():
410        log.critical("Can't refresh via the internet and you don't have the "
411                     "required files.")
412        log.critical('Terminating the validation.')
413        log.critical('')
414        log.critical('If you get the missing files from %s' % DATA_FILES_URL)
415        log.critical('then you can try to run the validation again.  Put the '
416                     'files into the directory')
417        log.critical("%s." % Local_Data_Directory)
418        sys.exit(10)
419
420# now untar mandatory objects
421for obj in Mandatory_Data_Objects:
422    tar_path = os.path.join(Local_Data_Directory, obj)
423    log.info('Untarring %s in directory %s ...'
424             % (tar_path, Local_Data_Directory))
425    untar_file(tar_path, target_dir=Local_Data_Directory)
426
427# set required environment variables
428set_environment()
429
430# now run what simulations we can and check output is as expected
431for odo in Optional_Data_Objects:
432    start_time = time.time()
433    (_, vtype, _) = odo.rsplit('.', 2)
434    vtype = vtype.lower()
435    log.critical('#' * 72)
436    log.critical("Running Patong '%s' validation ..." % vtype)
437    if run_simulation(vtype, odo):
438        # get SWW names expected and valid, check 'equal'
439        (valid_sww, _) = odo.rsplit('.', 1)
440        (expected_sww, _) = valid_sww.rsplit('.', 1)
441        check_that_output_is_as_expected(expected_sww, valid_sww)
442    shutil.move(log_filename, '%s.%s' % (log_filename, vtype))
443    stop_time = time.time()
444    log.critical("'%s' validation took %.1fs" % (vtype, stop_time - start_time))
445
446# clean up
447teardown()
Note: See TracBrowser for help on using the repository browser.