source: anuga_validation/automated_validation_tests/patong_beach_validation/validate.py @ 7567

Last change on this file since 7567 was 7567, checked in by ole, 14 years ago

Got rid of environment variables and move log file to output dir.
The logging module will need some work, though.

  • Property svn:executable set to *
File size: 15.2 KB
Line 
1'''
2Automatic verification that the ANUGA code runs the Patong simulation
3and produces the expected output.
4
5Required files are downloaded from the ANUGA servers if they are
6out of date or missing.
7'''
8
9import sys
10import os
11import glob
12import unittest
13import time
14import shutil
15
16from anuga.utilities.system_tools \
17     import get_web_file, untar_file, file_length, get_host_name
18import anuga.utilities.log as log
19
20log.log_filename = './validation.log'
21
22# sourceforge download mirror hosts (must end with '/')
23# try these in turn for each file
24## NOTE: It would be more reliable if we could somehow 'poll' Sourceforge
25##       for a list of mirrors instead of hard-coding a list here.  The only
26##       way to do this at the moment is to 'screen-scrape' the data at
27##       http://apps.sourceforge.net/trac/sourceforge/wiki/Mirrors
28##       but that only gets the main web page for the entity, not the
29##       Sourceforge download mirror server.
30MIRRORS = ['http://transact.dl.sourceforge.net/sourceforge/anuga/',       # au
31           'http://voxel.dl.sourceforge.net/sourceforge/anuga/',          # us
32           'http://superb-west.dl.sourceforge.net/sourceforge/anuga/',    # us
33           'http://jaist.dl.sourceforge.net/sourceforge/anuga/',          # jp
34           'http://dfn.dl.sourceforge.net/sourceforge/anuga/'             # de
35          ]
36
37### for testing
38##MIRRORS = ['http://10.7.64.243/patong_validation_data/']       # local linux box
39
40# URL to hand-get data files, if required
41DATA_FILES_URL = ('http://sourceforge.net/project/showfiles.php?'
42                  'group_id=172848&package_id=319323&release_id=677531')
43
44# sequence of mandatory local data objects
45Mandatory_Data_Objects = ('data.tgz',)
46
47# sequence of optional local data objects.
48# these names must be of the form <scene>.sww.<type>.tgz
49# as code below depends upon it.
50Optional_Data_Objects = (
51                         'patong.sww.TRIAL.tgz',
52                         'patong.sww.BASIC.tgz',
53                         'patong.sww.FINAL.tgz'
54                        )
55
56# path to the local data directory
57Local_Data_Directory = 'local_data'
58
59# path to the remote data directory
60Remote_Data_Directory = 'remote_data'
61
62# name of stdout catch file for runmodel.py
63RUNMODEL_STDOUT = 'runmodel.stdout'
64
65# text at start of 'output dir' line in RUNMODEL_STDOUT file
66OUTDIR_PREFIX = 'Make directory '
67
68# Name of SWW file produced by run_model.py
69OUTPUT_SWW = 'patong.sww'
70
71
72def setup():
73    '''Prepare for the validation run.'''
74   
75    pass
76
77
78def refresh_local_data(data_objects, target_dir, mirrors):
79    '''Update local data objects from the server.
80
81    data_objects:   list of files to refresh
82    target_dir:     directory in which to put files
83    mirrors:        list of mirror sites to use
84   
85    Each file has an associated *.digest file used to decide
86    if the local file needs refreshing.
87   
88    Return True if all went well, else False.
89    '''
90
91    # decision function to decide if a file contains HTML
92    def is_html(filename):
93        '''Decide if given file contains HTML.'''
94       
95        fd = open(filename)
96        data = fd.read(1024)
97        fd.close()
98
99        if 'DOCTYPE' in data:
100            return True
101       
102        return False
103
104   
105    # local function to get remote file from one of mirrors
106    def get_remote_from_mirrors(remote, local, auth, mirrors):
107        '''Get 'remote' from one of 'mirrors', put in 'local'.'''
108
109        # Get a unique date+time string to defeat caching.  The idea is to add
110        # this to the end of any URL so proxy sees a different request.
111        cache_defeat = '?' + time.strftime('%Y%m%d%H%M%S')
112
113        # try each mirror when getting file
114        for mirror in mirrors:
115            log.debug('Fetching remote file %s from mirror %s'
116                      % (remote, mirror))
117
118            remote_url = mirror + remote + cache_defeat
119            (result, auth) = get_web_file(remote_url, local, auth=auth)
120            if result and is_html(local)==False:
121                log.debug('Success fetching file %s' % remote)
122                return (True, auth)
123            log.debug('Failure fetching from %s' % mirror)
124            auth = None
125
126        log.debug('Failure fetching file %s' % remote)
127        return (False, auth)           
128               
129
130    # local function to compare contents of two files
131    def files_same(file_a, file_b):
132        '''Compare two files to see if contents are the same.'''
133       
134        fd = open(file_a, 'r')
135        data_a = fd.read()
136        fd.close()
137
138        fd = open(file_b, 'r')
139        data_b = fd.read()
140        fd.close()
141
142        return data_a == data_b
143
144       
145    # local function to update one data object
146    def refresh_object(obj, auth, mirrors):
147        '''Update object 'obj' using authentication tuple 'auth'.
148       
149        Return (True, <updated_auth>) if all went well,
150        else (False, <updated_auth>).
151        '''
152
153        # create local and remote file paths.
154        obj_digest = obj + '.digest'
155       
156        remote_file = os.path.join(Remote_Data_Directory, obj)
157        remote_digest = remote_file + '.digest'
158       
159        local_file = os.path.join(Local_Data_Directory, obj)
160        local_digest = local_file + '.digest'
161       
162        # see if missing either digest or object .tgz
163        if not os.path.exists(local_digest) or not os.path.exists(local_file):
164            # no digest or no object, download both digest and object
165            (res, auth) = get_remote_from_mirrors(obj_digest, local_digest, auth, mirrors)
166            if res:
167                (res, auth) = get_remote_from_mirrors(obj, local_file, auth, mirrors)
168        else:
169            # download object digest to remote data directory
170            (res, auth) = get_remote_from_mirrors(obj_digest, remote_digest, auth, mirrors)
171            if res:
172                if not files_same(local_digest, remote_digest):
173                    # digests differ, refresh object
174                    shutil.move(remote_digest, local_digest)
175                    (res, auth) = get_remote_from_mirrors(obj, local_file, auth, mirrors)
176
177        return (res, auth)
178
179    # create local data directory if required
180    log.debug('Creating local directory: %s' % Local_Data_Directory)
181    if not os.path.exists(Local_Data_Directory):
182        os.mkdir(Local_Data_Directory)
183
184    # clean out remote data copy directory
185    log.debug('Cleaning remote directory: %s' % Remote_Data_Directory)
186    shutil.rmtree(Remote_Data_Directory, ignore_errors=True)
187    os.mkdir(Remote_Data_Directory)
188
189    # success, refresh local files
190    auth = None
191    result = True
192    for data_object in data_objects:
193        log.info("Refreshing file '%s'" % data_object)
194        log.debug('refresh_local_data: getting %s from mirrors, auth=%s'
195                  % (data_object, str(auth)))
196        (res, auth) = refresh_object(data_object, auth, mirrors)
197        log.debug('refresh_local_data: returned (res,auth)=%s,%s'
198                  % (str(res), str(auth)))
199        if res == False:
200            log.info('Refresh of file %s failed.' % data_object)
201            result = False
202            # don't use possibly bad 'auth' again,
203            # some proxies lock out on repeated failures.
204            auth = None
205
206    if result:
207        log.critical('Local data has been refreshed.')
208    else:
209        log.critical('Local data has been refreshed, with one or more errors.')
210    log.critical()
211    return result
212
213
214def can_we_run():
215    '''Decide if we can run with the files we have.
216   
217    Return True if we *can* run, else False.
218
219    Tell user what is happening first, then untar files.
220    '''
221
222    log.critical('Checking if you have the required files to run:')
223
224    # get max width of object name string
225    max_width = 0
226    for obj in Mandatory_Data_Objects:
227        max_width = max(len(obj), max_width)
228    for obj in Optional_Data_Objects:
229        max_width = max(len(obj), max_width)
230
231    # if we don't have *all* mandatory object, can't run
232    have_mandatory_files = True
233    for obj in Mandatory_Data_Objects:
234        obj_path = os.path.join(Local_Data_Directory, obj)
235        if os.path.exists(obj_path):
236            log.info('\t%s  found' % obj.ljust(max_width))
237        else:
238            log.info('\t%s  MISSING AND REQUIRED!' % obj.ljust(max_width))
239            have_mandatory_files = False
240
241    # at least *one* of these must exist
242    have_optional_files = False
243    for obj in Optional_Data_Objects:
244        obj_path = os.path.join(Local_Data_Directory, obj)
245        if os.path.exists(obj_path):
246            have_optional_files = True
247            log.info('\t%s  found' % obj.ljust(max_width))
248        else:
249            log.info('\t%s  MISSING!' % obj.ljust(max_width))
250
251    if not have_mandatory_files or not have_optional_files:
252        log.critical('You must obtain the missing files before you can run '
253                     'this validation.')
254        return False
255
256    log.critical('You have enough required files to run.')
257    log.critical()
258
259    return True
260
261
262def set_environment():
263    # modify environment so we use the local data
264    new_inundationhome = os.path.join(Local_Data_Directory, '')
265    os.environ['INUNDATIONHOME'] = new_inundationhome
266    new_muxhome = os.path.join(Local_Data_Directory, 'data')
267    os.environ['MUXHOME'] = new_muxhome
268
269
270def run_simulation(vtype, sim_obj):
271    '''Run a simulation.
272
273    Returns True if all went well, else False.
274    '''
275   
276    # untar the object
277    tar_path = os.path.join(Local_Data_Directory, sim_obj)
278    log.info('Untarring %s in directory %s ...'
279             % (tar_path, Local_Data_Directory))
280    untar_file(tar_path, target_dir=Local_Data_Directory)
281
282    # modify project.py template
283    log.debug("Creating '%s' version of project.py" % vtype)
284    fd = open('project_template.py', 'r')
285    project = fd.readlines()
286    fd.close()
287
288    new_project = []
289    for line in project:
290        new_project.append(line.replace('#!SETUP!#', vtype.lower()))
291           
292    fd = open('project.py', 'w')
293    fd.write(''.join(new_project))
294    fd.close()
295   
296    # import new project.py
297    import project
298
299    # run the simulation, produce SWW file
300    log.info('Running the simulation ...')
301    cmd = 'python run_model.py > %s' % RUNMODEL_STDOUT
302    log.debug("run_simulation: doing '%s'" % cmd)
303    res = os.system(cmd)
304    log.debug("run_simulation: res=%d" % res)
305
306    # 'unimport' project.py
307    del project
308
309    # check result
310    if res != 0:
311        log.critical('Simulation failed, check log')
312
313    return res == 0
314
315def check_that_output_is_as_expected(expected_sww, valid_sww):
316    '''Check that validation output is as required.'''
317
318    # get path to expected SWW file
319    log.critical('Checking that simulation results are as expected ...')
320    local_sww = os.path.join(Local_Data_Directory, valid_sww)
321
322    # get output directory from stdout capture file
323    try:
324        fd = open(RUNMODEL_STDOUT, 'r')
325    except IOError, e:
326        log.critical("Can't open catch file '%s': %s"
327                     % (RUNMODEL_STDOUT, str(e)))
328        return 1
329    lines = fd.readlines()
330    fd.close
331
332    output_directory = None
333    for line in lines:
334        if line.startswith(OUTDIR_PREFIX):
335            output_directory = line.replace(OUTDIR_PREFIX, '', 1)
336            output_directory = output_directory.strip('\n')
337            break
338    if output_directory is None:
339        log.critical("Couldn't find line starting with '%s' in file '%s'"
340                     % (OUTDIR_PREFIX, RUNMODEL_STDOUT))
341        return 1
342
343    log.debug('check_that_output_is_as_expected: output_directory=%s'
344              % output_directory)
345   
346    # compare SWW files here and there
347    new_output_sww = os.path.join(output_directory, expected_sww)
348    cmd = 'python cmpsww.py %s %s > cmpsww.stdout' % (local_sww, new_output_sww)
349    log.debug("check_that_output_is_as_expected: doing '%s'" % cmd)
350    res = os.system(cmd)
351    log.debug("check_that_output_is_as_expected: res=%d" % res)
352    log.critical()
353    if res == 0:
354        log.info('Simulation results are as expected.')
355    else:
356        log.critical('Simulation results are NOT as expected.')
357        fd = open('cmpsww.stdout', 'r')
358        cmp_error = fd.readlines()
359        fd.close()
360        log.critical(''.join(cmp_error))
361
362
363def teardown():
364    '''Clean up after validation run.'''
365
366    log.debug('teardown: called')
367   
368    # remove remote directory and stdout capture file
369    shutil.rmtree(Remote_Data_Directory, ignore_errors=True)
370    try:
371        os.remove(RUNMODEL_STDOUT)
372    except OSError:
373        pass
374           
375
376################################################################################
377# Mainline - run the simulation, check output.
378################################################################################
379
380# set logging levels
381log.console_logging_level = log.INFO
382log.log_logging_level = log.DEBUG
383
384log.debug("Machine we are running on is '%s'" % get_host_name())
385setup()
386
387# prepare user for what is about to happen
388log.critical('''
389Please note that this validation test is accurate only on 64bit Linux or
390Windows.  Running the validation on a 32bit operating system will result in
391small differences in the generated mesh which defeats the simplistic test for
392equality between the generated and expected SWW files.
393
394This validation requires a working internet connection to refresh its files.
395You may still run this validation without an internet connection if you have the
396required files.
397
398If you are behind a proxy server you will need to supply your proxy details
399such as the proxy server address and your proxy username and password.  These
400can be defined in one or more of the environment variables:
401    HTTP_PROXY
402    PROXY_USERNAME
403    PROXY_PASSWORD
404if you wish.  If not supplied in environment variables you will be prompted for
405the information.
406''')
407
408
409# make sure local data is up to date
410all_objects = Mandatory_Data_Objects + Optional_Data_Objects
411if not refresh_local_data(all_objects, Local_Data_Directory, MIRRORS):
412    if not can_we_run():
413        log.critical("Can't refresh via the internet and you don't have the "
414                     "required files.")
415        log.critical('Terminating the validation.')
416        log.critical('')
417        log.critical('If you get the missing files from %s' % DATA_FILES_URL)
418        log.critical('then you can try to run the validation again.  Put the '
419                     'files into the directory')
420        log.critical("%s." % Local_Data_Directory)
421        sys.exit(10)
422
423# now untar mandatory objects
424for obj in Mandatory_Data_Objects:
425    tar_path = os.path.join(Local_Data_Directory, obj)
426    log.info('Untarring %s in directory %s ...'
427             % (tar_path, Local_Data_Directory))
428    untar_file(tar_path, target_dir=Local_Data_Directory)
429
430# set required environment variables
431set_environment()
432
433# now run what simulations we can and check output is as expected
434for odo in Optional_Data_Objects:
435    start_time = time.time()
436
437    (_, vtype, _) = odo.rsplit('.', 2)
438    vtype = vtype.lower()
439    log.critical('#' * 72)
440    log.critical("Running Patong '%s' validation ..." % vtype)
441    if run_simulation(vtype, odo):
442        # get SWW names expected and valid, check 'equal'
443        (valid_sww, _) = odo.rsplit('.', 1)
444        (expected_sww, _) = valid_sww.rsplit('.', 1)
445        check_that_output_is_as_expected(expected_sww, valid_sww)
446
447    stop_time = time.time()
448    log.critical("'%s' validation took %.1fs\n\n\n" % (vtype, stop_time - start_time))
449
450# clean up
451log.critical('Tearing down ...')
452teardown()
Note: See TracBrowser for help on using the repository browser.