source: anuga_validation/automated_validation_tests/patong_beach_validation/validate.py @ 7544

Last change on this file since 7544 was 7544, checked in by rwilson, 14 years ago

Dump machine name to the log file so we know what machine the validation was run on.

  • Property svn:executable set to *
File size: 15.2 KB
Line 
1'''
2Automatic verification that the ANUGA code runs the Patong simulation
3and produces the expected output.
4
5Required files are downloaded from the ANUGA servers if they are
6out of date or missing.
7'''
8
9import sys
10import os
11import glob
12import unittest
13import time
14import shutil
15
16from anuga.utilities.system_tools \
17     import get_web_file, untar_file, file_length, get_host_name
18import anuga.utilities.log as log
19
20
21# sourceforge download mirror hosts (must end with '/')
22# try these in turn for each file
23## NOTE: It would be more reliable if we could somehow 'poll' Sourceforge
24##       for a list of mirrors instead of hard-coding a list here.  The only
25##       way to do this at the moment is to 'screen-scrape' the data at
26##       http://apps.sourceforge.net/trac/sourceforge/wiki/Mirrors
27##       but that only gets the main web page for the entity, not the
28##       Sourceforge download mirror server.
29MIRRORS = ['http://transact.dl.sourceforge.net/sourceforge/anuga/',       # au
30           'http://voxel.dl.sourceforge.net/sourceforge/anuga/',          # us
31           'http://superb-west.dl.sourceforge.net/sourceforge/anuga/',    # us
32           'http://jaist.dl.sourceforge.net/sourceforge/anuga/',          # jp
33           'http://dfn.dl.sourceforge.net/sourceforge/anuga/'             # de
34          ]
35
36### for testing
37##MIRRORS = ['http://10.7.64.243/patong_validation_data/']       # local linux box
38
39# URL to hand-get data files, if required
40DATA_FILES_URL = ('http://sourceforge.net/project/showfiles.php?'
41                  'group_id=172848&package_id=319323&release_id=677531')
42
43# sequence of mandatory local data objects
44Mandatory_Data_Objects = ('data.tgz',)
45
46# sequence of optional local data objects.
47# these names must be of the form <scene>.sww.<type>.tgz
48# as code below depends upon it.
49Optional_Data_Objects = (
50                         'patong.sww.TRIAL.tgz',
51                         'patong.sww.BASIC.tgz',
52                         'patong.sww.FINAL.tgz'
53                        )
54
55# path to the local data directory
56Local_Data_Directory = 'local_data'
57
58# path to the remote data directory
59Remote_Data_Directory = 'remote_data'
60
61# name of stdout catch file for runmodel.py
62RUNMODEL_STDOUT = 'runmodel.stdout'
63
64# text at start of 'output dir' line in RUNMODEL_STDOUT file
65OUTDIR_PREFIX = 'Make directory '
66
67# Name of SWW file produced by run_model.py
68OUTPUT_SWW = 'patong.sww'
69
70
71def setup():
72    '''Prepare for the validation run.'''
73   
74    pass
75
76
77def refresh_local_data(data_objects, target_dir, mirrors):
78    '''Update local data objects from the server.
79
80    data_objects:   list of files to refresh
81    target_dir:     directory in which to put files
82    mirrors:        list of mirror sites to use
83   
84    Each file has an associated *.digest file used to decide
85    if the local file needs refreshing.
86   
87    Return True if all went well, else False.
88    '''
89
90    # decision function to decide if a file contains HTML
91    def is_html(filename):
92        '''Decide if given file contains HTML.'''
93       
94        fd = open(filename)
95        data = fd.read(1024)
96        fd.close()
97
98        if 'DOCTYPE' in data:
99            return True
100       
101        return False
102
103   
104    # local function to get remote file from one of mirrors
105    def get_remote_from_mirrors(remote, local, auth, mirrors):
106        '''Get 'remote' from one of 'mirrors', put in 'local'.'''
107
108        # Get a unique date+time string to defeat caching.  The idea is to add
109        # this to the end of any URL so proxy sees a different request.
110        cache_defeat = '?' + time.strftime('%Y%m%d%H%M%S')
111
112        # try each mirror when getting file
113        for mirror in mirrors:
114            log.debug('Fetching remote file %s from mirror %s'
115                      % (remote, mirror))
116
117            remote_url = mirror + remote + cache_defeat
118            (result, auth) = get_web_file(remote_url, local, auth=auth)
119            if result and is_html(local)==False:
120                log.debug('Success fetching file %s' % remote)
121                return (True, auth)
122            log.debug('Failure fetching from %s' % mirror)
123            auth = None
124
125        log.debug('Failure fetching file %s' % remote)
126        return (False, auth)           
127               
128
129    # local function to compare contents of two files
130    def files_same(file_a, file_b):
131        '''Compare two files to see if contents are the same.'''
132       
133        fd = open(file_a, 'r')
134        data_a = fd.read()
135        fd.close()
136
137        fd = open(file_b, 'r')
138        data_b = fd.read()
139        fd.close()
140
141        return data_a == data_b
142
143       
144    # local function to update one data object
145    def refresh_object(obj, auth, mirrors):
146        '''Update object 'obj' using authentication tuple 'auth'.
147       
148        Return (True, <updated_auth>) if all went well,
149        else (False, <updated_auth>).
150        '''
151
152        # create local and remote file paths.
153        obj_digest = obj + '.digest'
154       
155        remote_file = os.path.join(Remote_Data_Directory, obj)
156        remote_digest = remote_file + '.digest'
157       
158        local_file = os.path.join(Local_Data_Directory, obj)
159        local_digest = local_file + '.digest'
160       
161        # see if missing either digest or object .tgz
162        if not os.path.exists(local_digest) or not os.path.exists(local_file):
163            # no digest or no object, download both digest and object
164            (res, auth) = get_remote_from_mirrors(obj_digest, local_digest, auth, mirrors)
165            if res:
166                (res, auth) = get_remote_from_mirrors(obj, local_file, auth, mirrors)
167        else:
168            # download object digest to remote data directory
169            (res, auth) = get_remote_from_mirrors(obj_digest, remote_digest, auth, mirrors)
170            if res:
171                if not files_same(local_digest, remote_digest):
172                    # digests differ, refresh object
173                    shutil.move(remote_digest, local_digest)
174                    (res, auth) = get_remote_from_mirrors(obj, local_file, auth, mirrors)
175
176        return (res, auth)
177
178    # create local data directory if required
179    log.debug('Creating local directory: %s' % Local_Data_Directory)
180    if not os.path.exists(Local_Data_Directory):
181        os.mkdir(Local_Data_Directory)
182
183    # clean out remote data copy directory
184    log.debug('Cleaning remote directory: %s' % Remote_Data_Directory)
185    shutil.rmtree(Remote_Data_Directory, ignore_errors=True)
186    os.mkdir(Remote_Data_Directory)
187
188    # success, refresh local files
189    auth = None
190    result = True
191    for data_object in data_objects:
192        log.info("Refreshing file '%s'" % data_object)
193        log.debug('refresh_local_data: getting %s from mirrors, auth=%s'
194                  % (data_object, str(auth)))
195        (res, auth) = refresh_object(data_object, auth, mirrors)
196        log.debug('refresh_local_data: returned (res,auth)=%s,%s'
197                  % (str(res), str(auth)))
198        if res == False:
199            log.info('Refresh of file %s failed.' % data_object)
200            result = False
201            # don't use possibly bad 'auth' again,
202            # some proxies lock out on repeated failures.
203            auth = None
204
205    if result:
206        log.critical('Local data has been refreshed.')
207    else:
208        log.critical('Local data has been refreshed, with one or more errors.')
209    log.critical()
210    return result
211
212
213def can_we_run():
214    '''Decide if we can run with the files we have.
215   
216    Return True if we *can* run, else False.
217
218    Tell user what is happening first, then untar files.
219    '''
220
221    log.critical('Checking if you have the required files to run:')
222
223    # get max width of object name string
224    max_width = 0
225    for obj in Mandatory_Data_Objects:
226        max_width = max(len(obj), max_width)
227    for obj in Optional_Data_Objects:
228        max_width = max(len(obj), max_width)
229
230    # if we don't have *all* mandatory object, can't run
231    have_mandatory_files = True
232    for obj in Mandatory_Data_Objects:
233        obj_path = os.path.join(Local_Data_Directory, obj)
234        if os.path.exists(obj_path):
235            log.info('\t%s  found' % obj.ljust(max_width))
236        else:
237            log.info('\t%s  MISSING AND REQUIRED!' % obj.ljust(max_width))
238            have_mandatory_files = False
239
240    # at least *one* of these must exist
241    have_optional_files = False
242    for obj in Optional_Data_Objects:
243        obj_path = os.path.join(Local_Data_Directory, obj)
244        if os.path.exists(obj_path):
245            have_optional_files = True
246            log.info('\t%s  found' % obj.ljust(max_width))
247        else:
248            log.info('\t%s  MISSING!' % obj.ljust(max_width))
249
250    if not have_mandatory_files or not have_optional_files:
251        log.critical('You must obtain the missing files before you can run '
252                     'this validation.')
253        return False
254
255    log.critical('You have enough required files to run.')
256    log.critical()
257
258    return True
259
260
261def set_environment():
262    # modify environment so we use the local data
263    new_inundationhome = os.path.join(Local_Data_Directory, '')
264    os.environ['INUNDATIONHOME'] = new_inundationhome
265    new_muxhome = os.path.join(Local_Data_Directory, 'data')
266    os.environ['MUXHOME'] = new_muxhome
267
268
269def run_simulation(vtype, sim_obj):
270    '''Run a simulation.
271
272    Returns True if all went well, else False.
273    '''
274   
275    # untar the object
276    tar_path = os.path.join(Local_Data_Directory, sim_obj)
277    log.info('Untarring %s in directory %s ...'
278             % (tar_path, Local_Data_Directory))
279    untar_file(tar_path, target_dir=Local_Data_Directory)
280
281    # modify project.py template
282    log.debug("Creating '%s' version of project.py" % vtype)
283    fd = open('project_template.py', 'r')
284    project = fd.readlines()
285    fd.close()
286
287    new_project = []
288    for line in project:
289        new_project.append(line.replace('#!SETUP!#', vtype.lower()))
290           
291    fd = open('project.py', 'w')
292    fd.write(''.join(new_project))
293    fd.close()
294   
295    # import new project.py
296    import project
297
298    # run the simulation, produce SWW file
299    log.info('Running the simulation ...')
300    cmd = 'python run_model.py > %s' % RUNMODEL_STDOUT
301    log.debug("run_simulation: doing '%s'" % cmd)
302    res = os.system(cmd)
303    log.debug("run_simulation: res=%d" % res)
304
305    # 'unimport' project.py
306    del project
307
308    # check result
309    if res != 0:
310        log.critical('Simulation failed, check log')
311
312    return res == 0
313
314def check_that_output_is_as_expected(expected_sww, valid_sww):
315    '''Check that validation output is as required.'''
316
317    # get path to expected SWW file
318    log.critical('Checking that simulation results are as expected ...')
319    local_sww = os.path.join(Local_Data_Directory, valid_sww)
320
321    # get output directory from stdout capture file
322    try:
323        fd = open(RUNMODEL_STDOUT, 'r')
324    except IOError, e:
325        log.critical("Can't open catch file '%s': %s"
326                     % (RUNMODEL_STDOUT, str(e)))
327        return 1
328    lines = fd.readlines()
329    fd.close
330
331    output_directory = None
332    for line in lines:
333        if line.startswith(OUTDIR_PREFIX):
334            output_directory = line.replace(OUTDIR_PREFIX, '', 1)
335            output_directory = output_directory.strip('\n')
336            break
337    if output_directory is None:
338        log.critical("Couldn't find line starting with '%s' in file '%s'"
339                     % (OUTDIR_PREFIX, RUNMODEL_STDOUT))
340        return 1
341
342    log.debug('check_that_output_is_as_expected: output_directory=%s'
343              % output_directory)
344   
345    # compare SWW files here and there
346    new_output_sww = os.path.join(output_directory, expected_sww)
347    cmd = 'python cmpsww.py %s %s > cmpsww.stdout' % (local_sww, new_output_sww)
348    log.debug("check_that_output_is_as_expected: doing '%s'" % cmd)
349    res = os.system(cmd)
350    log.debug("check_that_output_is_as_expected: res=%d" % res)
351    log.critical()
352    if res == 0:
353        log.info('Simulation results are as expected.')
354    else:
355        log.critical('Simulation results are NOT as expected.')
356        fd = open('cmpsww.stdout', 'r')
357        cmp_error = fd.readlines()
358        fd.close()
359        log.critical(''.join(cmp_error))
360
361
362def teardown():
363    '''Clean up after validation run.'''
364
365    log.debug('teardown: called')
366   
367    # remove remote directory and stdout capture file
368    shutil.rmtree(Remote_Data_Directory, ignore_errors=True)
369    try:
370        os.remove(RUNMODEL_STDOUT)
371    except OSError:
372        pass
373           
374
375################################################################################
376# Mainline - run the simulation, check output.
377################################################################################
378
379# set logging levels
380log.console_logging_level = log.INFO
381log.log_logging_level = log.DEBUG
382
383log.debug("Machine we are running on is '%s'" % get_host_name())
384setup()
385
386# prepare user for what is about to happen
387log.critical('''
388Please note that this validation test is accurate only on 64bit Linux or
389Windows.  Running the validation on a 32bit operating system will result in
390small differences in the generated mesh which defeats the simplistic test for
391equality between the generated and expected SWW files.
392
393This validation requires a working internet connection to refresh its files.
394You may still run this validation without an internet connection if you have the
395required files.
396
397If you are behind a proxy server you will need to supply your proxy details
398such as the proxy server address and your proxy username and password.  These
399can be defined in one or more of the environment variables:
400    HTTP_PROXY
401    PROXY_USERNAME
402    PROXY_PASSWORD
403if you wish.  If not supplied in environment variables you will be prompted for
404the information.
405''')
406
407
408# make sure local data is up to date
409all_objects = Mandatory_Data_Objects + Optional_Data_Objects
410if not refresh_local_data(all_objects, Local_Data_Directory, MIRRORS):
411    if not can_we_run():
412        log.critical("Can't refresh via the internet and you don't have the "
413                     "required files.")
414        log.critical('Terminating the validation.')
415        log.critical('')
416        log.critical('If you get the missing files from %s' % DATA_FILES_URL)
417        log.critical('then you can try to run the validation again.  Put the '
418                     'files into the directory')
419        log.critical("%s." % Local_Data_Directory)
420        sys.exit(10)
421
422# now untar mandatory objects
423for obj in Mandatory_Data_Objects:
424    tar_path = os.path.join(Local_Data_Directory, obj)
425    log.info('Untarring %s in directory %s ...'
426             % (tar_path, Local_Data_Directory))
427    untar_file(tar_path, target_dir=Local_Data_Directory)
428
429# set required environment variables
430set_environment()
431
432# now run what simulations we can and check output is as expected
433for odo in Optional_Data_Objects:
434    start_time = time.time()
435
436    (_, vtype, _) = odo.rsplit('.', 2)
437    vtype = vtype.lower()
438    log.critical('#' * 72)
439    log.critical("Running Patong '%s' validation ..." % vtype)
440    if run_simulation(vtype, odo):
441        # get SWW names expected and valid, check 'equal'
442        (valid_sww, _) = odo.rsplit('.', 1)
443        (expected_sww, _) = valid_sww.rsplit('.', 1)
444        check_that_output_is_as_expected(expected_sww, valid_sww)
445
446    stop_time = time.time()
447    log.critical("'%s' validation took %.1fs\n\n\n" % (vtype, stop_time - start_time))
448
449# clean up
450log.critical('Tearing down ...')
451teardown()
Note: See TracBrowser for help on using the repository browser.