source: anuga_validation/automated_validation_tests/patong_beach_validation/validate.py @ 7653

Last change on this file since 7653 was 7653, checked in by ole, 14 years ago

Started a proper unit test framework for Patong

  • Property svn:executable set to *
File size: 15.5 KB
Line 
1'''
2Automatic verification that the ANUGA code runs the Patong simulation
3and produces the expected output.
4
5Required files are downloaded from the ANUGA servers if they are
6out of date or missing.
7'''
8
9import sys
10import os
11import glob
12import unittest
13import time
14import shutil
15
16from anuga.utilities.system_tools \
17     import get_web_file, untar_file, file_length, get_host_name
18import anuga.utilities.log as log
19
20log.log_filename = './validation.log'
21
22# sourceforge download mirror hosts (must end with '/')
23# try these in turn for each file
24## NOTE: It would be more reliable if we could somehow 'poll' Sourceforge
25##       for a list of mirrors instead of hard-coding a list here.  The only
26##       way to do this at the moment is to 'screen-scrape' the data at
27##       http://apps.sourceforge.net/trac/sourceforge/wiki/Mirrors
28##       but that only gets the main web page for the entity, not the
29##       Sourceforge download mirror server.
30MIRRORS = ['http://transact.dl.sourceforge.net/sourceforge/anuga/',       # au
31           'http://voxel.dl.sourceforge.net/sourceforge/anuga/',          # us
32           'http://superb-west.dl.sourceforge.net/sourceforge/anuga/',    # us
33           'http://jaist.dl.sourceforge.net/sourceforge/anuga/',          # jp
34           'http://dfn.dl.sourceforge.net/sourceforge/anuga/'             # de
35          ]
36
37### for testing
38##MIRRORS = ['http://10.7.64.243/patong_validation_data/']       # local linux box
39
40# URL to hand-get data files, if required
41DATA_FILES_URL = ('http://sourceforge.net/project/showfiles.php?'
42                  'group_id=172848&package_id=319323&release_id=677531')
43
44# sequence of mandatory local data objects
45Mandatory_Data_Objects = ('data.tgz',)
46
47# sequence of optional local data objects.
48# these names must be of the form <scene>.sww.<type>.tgz
49# as code below depends upon it.
50Optional_Data_Objects = (
51                         'patong.sww.TRIAL.tgz',
52                         'patong.sww.BASIC.tgz',
53                         'patong.sww.FINAL.tgz'
54                        )
55#Optional_Data_Objects = ('patong.sww.TRIAL.tgz',)
56
57# Associated tolerances to be used in comparisons (would depend on discretisation errors)
58epsilon = {'patong.sww.TRIAL.tgz': 1.0e-3,
59           'patong.sww.BASIC.tgz': 1.0e-4,
60           'patong.sww.FINAL.tgz': 1.0e-5}
61
62
63# path to the local data directory
64Local_Data_Directory = 'local_data'
65
66# path to the remote data directory
67Remote_Data_Directory = 'remote_data'
68
69# name of stdout catch file for runmodel.py
70RUNMODEL_STDOUT = 'runmodel.stdout'
71
72# text at start of 'output dir' line in RUNMODEL_STDOUT file
73OUTDIR_PREFIX = 'Output directory: '
74
75# Name of SWW file produced by run_model.py
76OUTPUT_SWW = 'patong.sww'
77
78
79def setup():
80    '''Prepare for the validation run.'''
81   
82    pass
83
84
85def refresh_local_data(data_objects, target_dir, mirrors):
86    '''Update local data objects from the server.
87
88    data_objects:   list of files to refresh
89    target_dir:     directory in which to put files
90    mirrors:        list of mirror sites to use
91   
92    Each file has an associated *.digest file used to decide
93    if the local file needs refreshing.
94   
95    Return True if all went well, else False.
96    '''
97
98    # decision function to decide if a file contains HTML
99    def is_html(filename):
100        '''Decide if given file contains HTML.'''
101       
102        fd = open(filename)
103        data = fd.read(1024)
104        fd.close()
105
106        if 'DOCTYPE' in data:
107            return True
108       
109        return False
110
111   
112    # local function to get remote file from one of mirrors
113    def get_remote_from_mirrors(remote, local, auth, mirrors):
114        '''Get 'remote' from one of 'mirrors', put in 'local'.'''
115
116        # Get a unique date+time string to defeat caching.  The idea is to add
117        # this to the end of any URL so proxy sees a different request.
118        cache_defeat = '?' + time.strftime('%Y%m%d%H%M%S')
119
120        # try each mirror when getting file
121        for mirror in mirrors:
122            log.debug('Fetching remote file %s from mirror %s'
123                      % (remote, mirror))
124
125            remote_url = mirror + remote + cache_defeat
126            (result, auth) = get_web_file(remote_url, local, auth=auth)
127            if result and is_html(local)==False:
128                log.debug('Success fetching file %s' % remote)
129                return (True, auth)
130            log.debug('Failure fetching from %s' % mirror)
131            auth = None
132
133        log.debug('Failure fetching file %s' % remote)
134        return (False, auth)           
135               
136
137    # local function to compare contents of two files
138    def files_same(file_a, file_b):
139        '''Compare two files to see if contents are the same.'''
140       
141        fd = open(file_a, 'r')
142        data_a = fd.read()
143        fd.close()
144
145        fd = open(file_b, 'r')
146        data_b = fd.read()
147        fd.close()
148
149        return data_a == data_b
150
151       
152    # local function to update one data object
153    def refresh_object(obj, auth, mirrors):
154        '''Update object 'obj' using authentication tuple 'auth'.
155       
156        Return (True, <updated_auth>) if all went well,
157        else (False, <updated_auth>).
158        '''
159
160        # create local and remote file paths.
161        obj_digest = obj + '.digest'
162       
163        remote_file = os.path.join(Remote_Data_Directory, obj)
164        remote_digest = remote_file + '.digest'
165       
166        local_file = os.path.join(Local_Data_Directory, obj)
167        local_digest = local_file + '.digest'
168       
169        # see if missing either digest or object .tgz
170        if not os.path.exists(local_digest) or not os.path.exists(local_file):
171            # no digest or no object, download both digest and object
172            (res, auth) = get_remote_from_mirrors(obj_digest, local_digest, auth, mirrors)
173            if res:
174                (res, auth) = get_remote_from_mirrors(obj, local_file, auth, mirrors)
175        else:
176            # download object digest to remote data directory
177            (res, auth) = get_remote_from_mirrors(obj_digest, remote_digest, auth, mirrors)
178            if res:
179                if not files_same(local_digest, remote_digest):
180                    # digests differ, refresh object
181                    shutil.move(remote_digest, local_digest)
182                    (res, auth) = get_remote_from_mirrors(obj, local_file, auth, mirrors)
183
184        return (res, auth)
185
186    # create local data directory if required
187    log.debug('Creating local directory: %s' % Local_Data_Directory)
188    if not os.path.exists(Local_Data_Directory):
189        os.mkdir(Local_Data_Directory)
190
191    # clean out remote data copy directory
192    log.debug('Cleaning remote directory: %s' % Remote_Data_Directory)
193    shutil.rmtree(Remote_Data_Directory, ignore_errors=True)
194    os.mkdir(Remote_Data_Directory)
195
196    # success, refresh local files
197    auth = None
198    result = True
199    for data_object in data_objects:
200        log.info("Refreshing file '%s'" % data_object)
201        log.debug('refresh_local_data: getting %s from mirrors, auth=%s'
202                  % (data_object, str(auth)))
203        (res, auth) = refresh_object(data_object, auth, mirrors)
204        log.debug('refresh_local_data: returned (res,auth)=%s,%s'
205                  % (str(res), str(auth)))
206        if res == False:
207            log.info('Refresh of file %s failed.' % data_object)
208            result = False
209            # don't use possibly bad 'auth' again,
210            # some proxies lock out on repeated failures.
211            auth = None
212
213    if result:
214        log.critical('Local data has been refreshed.')
215    else:
216        log.critical('Local data has been refreshed, with one or more errors.')
217    log.critical()
218    return result
219
220
221def can_we_run():
222    '''Decide if we can run with the files we have.
223   
224    Return True if we *can* run, else False.
225
226    Tell user what is happening first, then untar files.
227    '''
228
229    log.critical('Checking if you have the required files to run:')
230
231    # get max width of object name string
232    max_width = 0
233    for obj in Mandatory_Data_Objects:
234        max_width = max(len(obj), max_width)
235    for obj in Optional_Data_Objects:
236        max_width = max(len(obj), max_width)
237
238    # if we don't have *all* mandatory object, can't run
239    have_mandatory_files = True
240    for obj in Mandatory_Data_Objects:
241        obj_path = os.path.join(Local_Data_Directory, obj)
242        if os.path.exists(obj_path):
243            log.info('\t%s  found' % obj.ljust(max_width))
244        else:
245            log.info('\t%s  MISSING AND REQUIRED!' % obj.ljust(max_width))
246            have_mandatory_files = False
247
248    # at least *one* of these must exist
249    have_optional_files = False
250    for obj in Optional_Data_Objects:
251        obj_path = os.path.join(Local_Data_Directory, obj)
252        if os.path.exists(obj_path):
253            have_optional_files = True
254            log.info('\t%s  found' % obj.ljust(max_width))
255        else:
256            log.info('\t%s  MISSING!' % obj.ljust(max_width))
257
258    if not have_mandatory_files or not have_optional_files:
259        log.critical('You must obtain the missing files before you can run '
260                     'this validation.')
261        return False
262
263    log.critical('You have enough required files to run.')
264    log.critical()
265
266    return True
267
268
269def set_environment():
270    # modify environment so we use the local data
271    new_inundationhome = os.path.join(Local_Data_Directory, '')
272    os.environ['INUNDATIONHOME'] = new_inundationhome
273    new_muxhome = os.path.join(Local_Data_Directory, 'data')
274    os.environ['MUXHOME'] = new_muxhome
275
276
277def run_simulation(vtype, sim_obj):
278    '''Run a simulation.
279
280    Returns True if all went well, else False.
281    '''
282   
283    # untar the object
284    tar_path = os.path.join(Local_Data_Directory, sim_obj)
285    log.info('Untarring %s in directory %s ...'
286             % (tar_path, Local_Data_Directory))
287    untar_file(tar_path, target_dir=Local_Data_Directory)
288
289    # modify project.py template
290    log.debug("Creating '%s' version of project.py" % vtype)
291    fd = open('project_template.py', 'r')
292    project = fd.readlines()
293    fd.close()
294
295    new_project = []
296    for line in project:
297        new_project.append(line.replace('#!SETUP!#', vtype.lower()))
298           
299    fd = open('project.py', 'w')
300    fd.write(''.join(new_project))
301    fd.close()
302   
303    # import new project.py
304    import project
305
306    # run the simulation, produce SWW file
307    log.info('Running the simulation ...')
308    cmd = 'python run_model.py > %s' % RUNMODEL_STDOUT
309    log.debug("run_simulation: doing '%s'" % cmd)
310    res = os.system(cmd)
311    log.debug("run_simulation: res=%d" % res)
312
313    # 'unimport' project.py
314    del project
315
316    # check result
317    if res != 0:
318        log.critical('Simulation failed, check log')
319
320    return res == 0
321
322def check_that_output_is_as_expected(expected_sww, valid_sww, epsilon):
323    '''Check that validation output is as required.'''
324
325    # get path to expected SWW file
326    log.critical('Checking that simulation results are as expected ...')
327    local_sww = os.path.join(Local_Data_Directory, valid_sww)
328
329    # get output directory from stdout capture file
330    try:
331        fd = open(RUNMODEL_STDOUT, 'r')
332    except IOError, e:
333        log.critical("Can't open catch file '%s': %s"
334                     % (RUNMODEL_STDOUT, str(e)))
335        return 1
336    lines = fd.readlines()
337    fd.close
338
339    output_directory = None
340    for line in lines:
341        if line.startswith(OUTDIR_PREFIX):
342            output_directory = line.replace(OUTDIR_PREFIX, '', 1)
343            output_directory = output_directory.strip()
344            break
345    if output_directory is None:
346        log.critical("Couldn't find line starting with '%s' in file '%s'"
347                     % (OUTDIR_PREFIX, RUNMODEL_STDOUT))
348        return 1
349
350    log.debug('check_that_output_is_as_expected: output_directory=%s'
351              % output_directory)
352   
353    # compare SWW files here and there
354    new_output_sww = os.path.join(output_directory, expected_sww)
355    #cmd = 'python cmpsww.py %s %s > cmpsww.stdout' % (local_sww, new_output_sww)
356    cmd = 'python compare_model_timeseries.py %s %s %e > compare_model_timeseries.stdout' %\
357          (local_sww, new_output_sww, epsilon)
358    print '-------------------------------------'
359    print cmd
360    print '-------------------------------------'   
361   
362    log.debug("check_that_output_is_as_expected: doing '%s'" % cmd)
363    res = os.system(cmd)
364    log.debug("check_that_output_is_as_expected: res=%d" % res)
365    log.critical()
366    print 'Result', res
367    if res == 0:
368        log.info('Simulation results are as expected.')
369    else:
370        log.critical('Simulation results are NOT as expected.')
371        fd = open('compare_model_timeseries.stdout', 'r')
372        cmp_error = fd.readlines()
373        fd.close()
374        log.critical(''.join(cmp_error))
375
376
377def teardown():
378    '''Clean up after validation run.'''
379
380    log.debug('teardown: called')
381   
382    # remove remote directory and stdout capture file
383    #shutil.rmtree(Remote_Data_Directory, ignore_errors=True)
384    #try:
385    #    os.remove(RUNMODEL_STDOUT)
386    #except OSError:
387    #    pass
388           
389
390################################################################################
391# Mainline - run the simulation, check output.
392################################################################################
393
394# set logging levels
395log.console_logging_level = log.INFO
396log.log_logging_level = log.DEBUG
397
398log.debug('Machine we are running on is "%s"' % get_host_name())
399setup()
400
401# prepare user for what is about to happen
402log.critical('''
403This validation requires a working internet connection to refresh its files.
404You may still run this validation without an internet connection if you have the
405required files.
406
407If you are behind a proxy server you will need to supply your proxy details
408such as the proxy server address and your proxy username and password.  These
409can be defined in one or more of the environment variables:
410    HTTP_PROXY
411    PROXY_USERNAME
412    PROXY_PASSWORD
413if you wish.  If not supplied in environment variables you will be prompted for
414the information.
415''')
416
417
418# make sure local data is up to date
419all_objects = Mandatory_Data_Objects + Optional_Data_Objects
420if not refresh_local_data(all_objects, Local_Data_Directory, MIRRORS):
421    if not can_we_run():
422        log.critical("Can't refresh via the internet and you don't have the "
423                     "required files.")
424        log.critical('Terminating the validation.')
425        log.critical('')
426        log.critical('If you get the missing files from %s' % DATA_FILES_URL)
427        log.critical('then you can try to run the validation again.  Put the '
428                     'files into the directory')
429        log.critical("%s." % Local_Data_Directory)
430        sys.exit(10)
431
432# now untar mandatory objects
433for obj in Mandatory_Data_Objects:
434    tar_path = os.path.join(Local_Data_Directory, obj)
435    log.info('Untarring %s in directory %s ...'
436             % (tar_path, Local_Data_Directory))
437    untar_file(tar_path, target_dir=Local_Data_Directory)
438
439# set required environment variables
440set_environment()
441
442# now run what simulations we can and check output is as expected
443for odo in Optional_Data_Objects:
444    start_time = time.time()
445
446    _, vtype, _ = odo.rsplit('.', 2)
447    vtype = vtype.lower()
448    log.critical('#' * 72)
449    log.critical('Running Patong "%s" validation ...' % vtype)
450    if run_simulation(vtype, odo):
451        # get SWW names expected and valid, check 'equal'
452        valid_sww, _ = odo.rsplit('.', 1)
453        expected_sww, _ = valid_sww.rsplit('.', 1)
454        check_that_output_is_as_expected(expected_sww, valid_sww, epsilon[odo])
455
456    stop_time = time.time()
457    log.critical('"%s" validation took %.1fs\n\n\n' % (vtype, stop_time - start_time))
458
459# clean up
460log.critical('Tearing down ...')
461teardown()
Note: See TracBrowser for help on using the repository browser.