source: anuga_validation/automated_validation_tests/patong_beach_validation/validate.py @ 7641

Last change on this file since 7641 was 7641, checked in by ole, 14 years ago

Repaired patong validate reading output dir. I still think this is hacky.

  • Property svn:executable set to *
File size: 15.4 KB
Line 
1'''
2Automatic verification that the ANUGA code runs the Patong simulation
3and produces the expected output.
4
5Required files are downloaded from the ANUGA servers if they are
6out of date or missing.
7'''
8
9import sys
10import os
11import glob
12import unittest
13import time
14import shutil
15
16from anuga.utilities.system_tools \
17     import get_web_file, untar_file, file_length, get_host_name
18import anuga.utilities.log as log
19
20log.log_filename = './validation.log'
21
22# sourceforge download mirror hosts (must end with '/')
23# try these in turn for each file
24## NOTE: It would be more reliable if we could somehow 'poll' Sourceforge
25##       for a list of mirrors instead of hard-coding a list here.  The only
26##       way to do this at the moment is to 'screen-scrape' the data at
27##       http://apps.sourceforge.net/trac/sourceforge/wiki/Mirrors
28##       but that only gets the main web page for the entity, not the
29##       Sourceforge download mirror server.
30MIRRORS = ['http://transact.dl.sourceforge.net/sourceforge/anuga/',       # au
31           'http://voxel.dl.sourceforge.net/sourceforge/anuga/',          # us
32           'http://superb-west.dl.sourceforge.net/sourceforge/anuga/',    # us
33           'http://jaist.dl.sourceforge.net/sourceforge/anuga/',          # jp
34           'http://dfn.dl.sourceforge.net/sourceforge/anuga/'             # de
35          ]
36
37### for testing
38##MIRRORS = ['http://10.7.64.243/patong_validation_data/']       # local linux box
39
40# URL to hand-get data files, if required
41DATA_FILES_URL = ('http://sourceforge.net/project/showfiles.php?'
42                  'group_id=172848&package_id=319323&release_id=677531')
43
44# sequence of mandatory local data objects
45Mandatory_Data_Objects = ('data.tgz',)
46
47# sequence of optional local data objects.
48# these names must be of the form <scene>.sww.<type>.tgz
49# as code below depends upon it.
50#Optional_Data_Objects = (
51#                         'patong.sww.TRIAL.tgz',
52#                         'patong.sww.BASIC.tgz',
53#                         'patong.sww.FINAL.tgz'
54#                        )
55Optional_Data_Objects = ('patong.sww.TRIAL.tgz',)
56
57# path to the local data directory
58Local_Data_Directory = 'local_data'
59
60# path to the remote data directory
61Remote_Data_Directory = 'remote_data'
62
63# name of stdout catch file for runmodel.py
64RUNMODEL_STDOUT = 'runmodel.stdout'
65
66# text at start of 'output dir' line in RUNMODEL_STDOUT file
67OUTDIR_PREFIX = 'Output directory: '
68
69# Name of SWW file produced by run_model.py
70OUTPUT_SWW = 'patong.sww'
71
72
73def setup():
74    '''Prepare for the validation run.'''
75   
76    pass
77
78
79def refresh_local_data(data_objects, target_dir, mirrors):
80    '''Update local data objects from the server.
81
82    data_objects:   list of files to refresh
83    target_dir:     directory in which to put files
84    mirrors:        list of mirror sites to use
85   
86    Each file has an associated *.digest file used to decide
87    if the local file needs refreshing.
88   
89    Return True if all went well, else False.
90    '''
91
92    # decision function to decide if a file contains HTML
93    def is_html(filename):
94        '''Decide if given file contains HTML.'''
95       
96        fd = open(filename)
97        data = fd.read(1024)
98        fd.close()
99
100        if 'DOCTYPE' in data:
101            return True
102       
103        return False
104
105   
106    # local function to get remote file from one of mirrors
107    def get_remote_from_mirrors(remote, local, auth, mirrors):
108        '''Get 'remote' from one of 'mirrors', put in 'local'.'''
109
110        # Get a unique date+time string to defeat caching.  The idea is to add
111        # this to the end of any URL so proxy sees a different request.
112        cache_defeat = '?' + time.strftime('%Y%m%d%H%M%S')
113
114        # try each mirror when getting file
115        for mirror in mirrors:
116            log.debug('Fetching remote file %s from mirror %s'
117                      % (remote, mirror))
118
119            remote_url = mirror + remote + cache_defeat
120            (result, auth) = get_web_file(remote_url, local, auth=auth)
121            if result and is_html(local)==False:
122                log.debug('Success fetching file %s' % remote)
123                return (True, auth)
124            log.debug('Failure fetching from %s' % mirror)
125            auth = None
126
127        log.debug('Failure fetching file %s' % remote)
128        return (False, auth)           
129               
130
131    # local function to compare contents of two files
132    def files_same(file_a, file_b):
133        '''Compare two files to see if contents are the same.'''
134       
135        fd = open(file_a, 'r')
136        data_a = fd.read()
137        fd.close()
138
139        fd = open(file_b, 'r')
140        data_b = fd.read()
141        fd.close()
142
143        return data_a == data_b
144
145       
146    # local function to update one data object
147    def refresh_object(obj, auth, mirrors):
148        '''Update object 'obj' using authentication tuple 'auth'.
149       
150        Return (True, <updated_auth>) if all went well,
151        else (False, <updated_auth>).
152        '''
153
154        # create local and remote file paths.
155        obj_digest = obj + '.digest'
156       
157        remote_file = os.path.join(Remote_Data_Directory, obj)
158        remote_digest = remote_file + '.digest'
159       
160        local_file = os.path.join(Local_Data_Directory, obj)
161        local_digest = local_file + '.digest'
162       
163        # see if missing either digest or object .tgz
164        if not os.path.exists(local_digest) or not os.path.exists(local_file):
165            # no digest or no object, download both digest and object
166            (res, auth) = get_remote_from_mirrors(obj_digest, local_digest, auth, mirrors)
167            if res:
168                (res, auth) = get_remote_from_mirrors(obj, local_file, auth, mirrors)
169        else:
170            # download object digest to remote data directory
171            (res, auth) = get_remote_from_mirrors(obj_digest, remote_digest, auth, mirrors)
172            if res:
173                if not files_same(local_digest, remote_digest):
174                    # digests differ, refresh object
175                    shutil.move(remote_digest, local_digest)
176                    (res, auth) = get_remote_from_mirrors(obj, local_file, auth, mirrors)
177
178        return (res, auth)
179
180    # create local data directory if required
181    log.debug('Creating local directory: %s' % Local_Data_Directory)
182    if not os.path.exists(Local_Data_Directory):
183        os.mkdir(Local_Data_Directory)
184
185    # clean out remote data copy directory
186    log.debug('Cleaning remote directory: %s' % Remote_Data_Directory)
187    shutil.rmtree(Remote_Data_Directory, ignore_errors=True)
188    os.mkdir(Remote_Data_Directory)
189
190    # success, refresh local files
191    auth = None
192    result = True
193    for data_object in data_objects:
194        log.info("Refreshing file '%s'" % data_object)
195        log.debug('refresh_local_data: getting %s from mirrors, auth=%s'
196                  % (data_object, str(auth)))
197        (res, auth) = refresh_object(data_object, auth, mirrors)
198        log.debug('refresh_local_data: returned (res,auth)=%s,%s'
199                  % (str(res), str(auth)))
200        if res == False:
201            log.info('Refresh of file %s failed.' % data_object)
202            result = False
203            # don't use possibly bad 'auth' again,
204            # some proxies lock out on repeated failures.
205            auth = None
206
207    if result:
208        log.critical('Local data has been refreshed.')
209    else:
210        log.critical('Local data has been refreshed, with one or more errors.')
211    log.critical()
212    return result
213
214
215def can_we_run():
216    '''Decide if we can run with the files we have.
217   
218    Return True if we *can* run, else False.
219
220    Tell user what is happening first, then untar files.
221    '''
222
223    log.critical('Checking if you have the required files to run:')
224
225    # get max width of object name string
226    max_width = 0
227    for obj in Mandatory_Data_Objects:
228        max_width = max(len(obj), max_width)
229    for obj in Optional_Data_Objects:
230        max_width = max(len(obj), max_width)
231
232    # if we don't have *all* mandatory object, can't run
233    have_mandatory_files = True
234    for obj in Mandatory_Data_Objects:
235        obj_path = os.path.join(Local_Data_Directory, obj)
236        if os.path.exists(obj_path):
237            log.info('\t%s  found' % obj.ljust(max_width))
238        else:
239            log.info('\t%s  MISSING AND REQUIRED!' % obj.ljust(max_width))
240            have_mandatory_files = False
241
242    # at least *one* of these must exist
243    have_optional_files = False
244    for obj in Optional_Data_Objects:
245        obj_path = os.path.join(Local_Data_Directory, obj)
246        if os.path.exists(obj_path):
247            have_optional_files = True
248            log.info('\t%s  found' % obj.ljust(max_width))
249        else:
250            log.info('\t%s  MISSING!' % obj.ljust(max_width))
251
252    if not have_mandatory_files or not have_optional_files:
253        log.critical('You must obtain the missing files before you can run '
254                     'this validation.')
255        return False
256
257    log.critical('You have enough required files to run.')
258    log.critical()
259
260    return True
261
262
263def set_environment():
264    # modify environment so we use the local data
265    new_inundationhome = os.path.join(Local_Data_Directory, '')
266    os.environ['INUNDATIONHOME'] = new_inundationhome
267    new_muxhome = os.path.join(Local_Data_Directory, 'data')
268    os.environ['MUXHOME'] = new_muxhome
269
270
271def run_simulation(vtype, sim_obj):
272    '''Run a simulation.
273
274    Returns True if all went well, else False.
275    '''
276   
277    # untar the object
278    tar_path = os.path.join(Local_Data_Directory, sim_obj)
279    log.info('Untarring %s in directory %s ...'
280             % (tar_path, Local_Data_Directory))
281    untar_file(tar_path, target_dir=Local_Data_Directory)
282
283    # modify project.py template
284    log.debug("Creating '%s' version of project.py" % vtype)
285    fd = open('project_template.py', 'r')
286    project = fd.readlines()
287    fd.close()
288
289    new_project = []
290    for line in project:
291        new_project.append(line.replace('#!SETUP!#', vtype.lower()))
292           
293    fd = open('project.py', 'w')
294    fd.write(''.join(new_project))
295    fd.close()
296   
297    # import new project.py
298    import project
299
300    # run the simulation, produce SWW file
301    log.info('Running the simulation ...')
302    cmd = 'python run_model.py > %s' % RUNMODEL_STDOUT
303    log.debug("run_simulation: doing '%s'" % cmd)
304    res = os.system(cmd)
305    log.debug("run_simulation: res=%d" % res)
306
307    # 'unimport' project.py
308    del project
309
310    # check result
311    if res != 0:
312        log.critical('Simulation failed, check log')
313
314    return res == 0
315
316def check_that_output_is_as_expected(expected_sww, valid_sww):
317    '''Check that validation output is as required.'''
318
319    # get path to expected SWW file
320    log.critical('Checking that simulation results are as expected ...')
321    local_sww = os.path.join(Local_Data_Directory, valid_sww)
322
323    # get output directory from stdout capture file
324    try:
325        fd = open(RUNMODEL_STDOUT, 'r')
326    except IOError, e:
327        log.critical("Can't open catch file '%s': %s"
328                     % (RUNMODEL_STDOUT, str(e)))
329        return 1
330    lines = fd.readlines()
331    fd.close
332
333    output_directory = None
334    for line in lines:
335        if line.startswith(OUTDIR_PREFIX):
336            output_directory = line.replace(OUTDIR_PREFIX, '', 1)
337            output_directory = output_directory.strip()
338            break
339    if output_directory is None:
340        log.critical("Couldn't find line starting with '%s' in file '%s'"
341                     % (OUTDIR_PREFIX, RUNMODEL_STDOUT))
342        return 1
343
344    log.debug('check_that_output_is_as_expected: output_directory=%s'
345              % output_directory)
346   
347    # compare SWW files here and there
348    new_output_sww = os.path.join(output_directory, expected_sww)
349    #cmd = 'python cmpsww.py %s %s > cmpsww.stdout' % (local_sww, new_output_sww)
350    cmd = 'python compare_model_timeseries.py %s %s > compare_model_timeseries.stdout' % (local_sww, new_output_sww)
351    log.debug("check_that_output_is_as_expected: doing '%s'" % cmd)
352    res = os.system(cmd)
353    log.debug("check_that_output_is_as_expected: res=%d" % res)
354    log.critical()
355    if res == 0:
356        log.info('Simulation results are as expected.')
357    else:
358        log.critical('Simulation results are NOT as expected.')
359        fd = open('compare_model_timeseries.stdout', 'r')
360        cmp_error = fd.readlines()
361        fd.close()
362        log.critical(''.join(cmp_error))
363
364
365def teardown():
366    '''Clean up after validation run.'''
367
368    log.debug('teardown: called')
369   
370    # remove remote directory and stdout capture file
371    #shutil.rmtree(Remote_Data_Directory, ignore_errors=True)
372    #try:
373    #    os.remove(RUNMODEL_STDOUT)
374    #except OSError:
375    #    pass
376           
377
378################################################################################
379# Mainline - run the simulation, check output.
380################################################################################
381
382# set logging levels
383log.console_logging_level = log.INFO
384log.log_logging_level = log.DEBUG
385
386log.debug("Machine we are running on is '%s'" % get_host_name())
387setup()
388
389# prepare user for what is about to happen
390log.critical('''
391Please note that this validation test is accurate only on 64bit Linux or
392Windows.  Running the validation on a 32bit operating system will result in
393small differences in the generated mesh which defeats the simplistic test for
394equality between the generated and expected SWW files.
395
396This validation requires a working internet connection to refresh its files.
397You may still run this validation without an internet connection if you have the
398required files.
399
400If you are behind a proxy server you will need to supply your proxy details
401such as the proxy server address and your proxy username and password.  These
402can be defined in one or more of the environment variables:
403    HTTP_PROXY
404    PROXY_USERNAME
405    PROXY_PASSWORD
406if you wish.  If not supplied in environment variables you will be prompted for
407the information.
408''')
409
410
411# make sure local data is up to date
412all_objects = Mandatory_Data_Objects + Optional_Data_Objects
413if not refresh_local_data(all_objects, Local_Data_Directory, MIRRORS):
414    if not can_we_run():
415        log.critical("Can't refresh via the internet and you don't have the "
416                     "required files.")
417        log.critical('Terminating the validation.')
418        log.critical('')
419        log.critical('If you get the missing files from %s' % DATA_FILES_URL)
420        log.critical('then you can try to run the validation again.  Put the '
421                     'files into the directory')
422        log.critical("%s." % Local_Data_Directory)
423        sys.exit(10)
424
425# now untar mandatory objects
426for obj in Mandatory_Data_Objects:
427    tar_path = os.path.join(Local_Data_Directory, obj)
428    log.info('Untarring %s in directory %s ...'
429             % (tar_path, Local_Data_Directory))
430    untar_file(tar_path, target_dir=Local_Data_Directory)
431
432# set required environment variables
433set_environment()
434
435# now run what simulations we can and check output is as expected
436for odo in Optional_Data_Objects:
437    start_time = time.time()
438
439    (_, vtype, _) = odo.rsplit('.', 2)
440    vtype = vtype.lower()
441    log.critical('#' * 72)
442    log.critical("Running Patong '%s' validation ..." % vtype)
443    if run_simulation(vtype, odo):
444        # get SWW names expected and valid, check 'equal'
445        (valid_sww, _) = odo.rsplit('.', 1)
446        (expected_sww, _) = valid_sww.rsplit('.', 1)
447        check_that_output_is_as_expected(expected_sww, valid_sww)
448
449    stop_time = time.time()
450    log.critical("'%s' validation took %.1fs\n\n\n" % (vtype, stop_time - start_time))
451
452# clean up
453log.critical('Tearing down ...')
454teardown()
Note: See TracBrowser for help on using the repository browser.