source: anuga_validation/automated_validation_tests/patong_beach_validation/validate.py @ 6903

Last change on this file since 6903 was 6903, checked in by rwilson, 15 years ago

Ongoing changes with Patong validation.

  • Property svn:executable set to *
File size: 14.4 KB
Line 
1'''
2Automatic verification that the ANUGA code runs the Patong simulation
3and produces the expected output.
4
5Required files are downloaded from the ANUGA servers if they are
6out of date or missing.
7'''
8
9import sys
10import os
11import glob
12import unittest
13import time
14import shutil
15
16from anuga.utilities.system_tools import get_web_file, untar_file, file_length
17import anuga.utilities.log as log
18
19
20# sourceforge download mirror hosts (must end with '/')
21# try these in turn for each file
22## NOTE: It would be more reliable if we could somehow 'poll' Sourceforge
23##       for a list of mirrors instead of hard-coding a list here.  That may
24##       be too difficult, as we only get the chance to select a mirror when
25##       actually downloading a file.
26MIRRORS = ['http://transact.dl.sourceforge.net/sourceforge/anuga/',       # au
27           'http://voxel.dl.sourceforge.net/sourceforge/anuga/',          # us
28           'http://superb-west.dl.sourceforge.net/sourceforge/anuga/',    # us
29           'http://jaist.dl.sourceforge.net/sourceforge/anuga/',          # jp
30           'http://dfn.dl.sourceforge.net/sourceforge/anuga/'             # de
31          ]
32##MIRRORS = ['http://10.7.64.243/patong_validation_data/']       # local linux box
33
34# URL to hand-get data files, if required
35DATA_FILES_URL = 'http://sourceforge.net/project/showfiles.php?group_id=172848'
36
37# sequence of mandatory local data objects
38Mandatory_Data_Objects = ('data.tgz',)
39
40# sequence of optional local data objects.
41# these names must be of the form <scene>.sww.<type>.tgz
42# as code below depends upon it.
43Optional_Data_Objects = ('patong.sww.TRIAL.tgz',
44                         'patong.sww.BASIC.tgz',
45                         'patong.sww.FINAL.tgz'
46                        )
47
48# path to the local data directory
49Local_Data_Directory = 'local_data'
50
51# path to the remote data directory
52Remote_Data_Directory = 'remote_data'
53
54# name of stdout catch file for runmodel.py
55RUNMODEL_STDOUT = 'runmodel.stdout'
56
57# text at start of 'output dir' line in RUNMODEL_STDOUT file
58OUTDIR_PREFIX = 'Make directory '
59
60# Name of SWW file produced by simulation
61OUTPUT_SWW = 'patong.sww'
62
63
64def setup():
65    '''Prepare for the validation run.
66
67    Check we have required data set in project.py.
68    '''
69   
70    pass
71
72
73def refresh_local_data(data_objects, target_dir, mirrors):
74    '''Update local data objects from the server.
75
76    data_objects:   list of files to refresh
77    target_dir:     directory in which to put files
78    mirrors:        list of mirror sites to use
79   
80    Each file has an associated *.digest file used to decide
81    if the local file needs refreshing.
82   
83    Return True if all went well, else False.
84    '''
85
86    # decision function to decide if a file contains HTML
87    def is_html(filename):
88        '''Decide if given file contains HTML.'''
89       
90        fd = open(filename)
91        data = fd.read(1024)
92        fd.close()
93
94        if 'DOCTYPE' in data:
95            return False
96       
97        return True
98
99   
100    # local function to get remote file from one of mirrors
101    def get_remote_from_mirrors(remote, local, auth, mirrors):
102        '''Get 'remote' from one of 'mirrors', put in 'local'.'''
103
104        # Get a unique date+time string to defeat caching.  The idea is to add
105        # this to the end of any URL so proxy sees a different request.
106        cache_defeat = '?' + time.strftime('%Y%m%d%H%M%S')
107
108        # try each mirror when getting file
109        for mirror in mirrors:
110            log.debug('Fetching remote file %s from mirror %s'
111                      % (remote, mirror))
112
113            remote_url = mirror + remote + cache_defeat
114            (result, auth) = get_web_file(remote_url, local, auth=auth)
115            if result and is_html(local)==False:
116                log.debug('Success fetching file %s' % remote_url)
117                return (True, auth)
118
119        return (False, auth)           
120               
121
122    # local function to compare contents of two files
123    def files_same(file_a, file_b):
124        '''Compare two files to see if contents are the same.'''
125       
126        fd = open(file_a, 'r')
127        data_a = fd.read()
128        fd.close()
129
130        fd = open(file_b, 'r')
131        data_b = fd.read()
132        fd.close()
133
134        return data_a == data_b
135
136       
137    # local function to update one data object
138    def refresh_object(obj, auth, mirrors):
139        '''Update object 'obj' using authentication tuple 'auth'.
140       
141        Return (True, <updated_auth>) if all went well,
142        else (False, <updated_auth>).
143        '''
144
145        # create local and remote file paths.
146        obj_digest = obj + '.digest'
147       
148        remote_file = os.path.join(Remote_Data_Directory, obj)
149        remote_digest = remote_file + '.digest'
150       
151        local_file = os.path.join(Local_Data_Directory, obj)
152        local_digest = local_file + '.digest'
153       
154        # see if missing either digest or object .tgz
155        if not os.path.exists(local_digest) or not os.path.exists(local_file):
156            # no digest or no object, download both digest and object
157            (res, auth) = get_remote_from_mirrors(obj_digest, local_digest, auth, mirrors)
158            if res:
159                (res, auth) = get_remote_from_mirrors(obj, local_file, auth, mirrors)
160        else:
161            # download object digest to remote data directory
162            (res, auth) = get_remote_from_mirrors(obj_digest, remote_digest, auth, mirrors)
163            if res:
164                if not files_same(local_data_digest, remote_data_digest):
165                    # digests differ, refresh object
166                    log.info('Local file %s is out of date, refreshing ...' % obj)
167                    shutil.move(remote_digest, local_digest)
168                    (res, auth) = get_remote_from_mirrors(obj, local_file, auth, mirrors)
169
170        return (res, auth)
171
172    # create local data directory if required
173    log.debug('Creating local directory: %s' % Local_Data_Directory)
174    if not os.path.exists(Local_Data_Directory):
175        os.mkdir(Local_Data_Directory)
176
177    # clean out remote data copy directory
178    log.debug('Cleaning remote directory: %s' % Remote_Data_Directory)
179    shutil.rmtree(Remote_Data_Directory, ignore_errors=True)
180    os.mkdir(Remote_Data_Directory)
181
182    # success, refresh local files
183    auth = None
184    result = True
185    for data_object in data_objects:
186        log.info("Refreshing file '%s'" % data_object)
187        log.debug('refresh_local_data: getting %s from mirrors, auth=%s'
188                  % (data_object, str(auth)))
189        (res, auth) = refresh_object(data_object, auth, mirrors)
190        log.debug('refresh_local_data: returned (res,auth)=%s,%s'
191                  % (str(res), str(auth)))
192        if res == False:
193            log.info('Refresh of file %s failed.' % data_object)
194            result = False
195
196    if result:
197        log.info('Local data has been refreshed.')
198    else:
199        log.info('Local data has been refreshed, with one or more errors.')
200    return result
201
202
203def can_we_run():
204    '''Decide if we can run with the files we have.
205   
206    Return True if we *can* run, else False.
207
208    Tell user what is happening first, then untar files.
209    '''
210
211    log.critical('Checking if you have the required files to run:')
212
213    # get max width of object name string
214    max_width = 0
215    for obj in Mandatory_Data_Objects:
216        max_width = max(len(obj), max_width)
217    for obj in Optional_Data_Objects:
218        max_width = max(len(obj), max_width)
219
220    # if we don't have *all* mandatory object, can't run
221    have_mandatory_files = True
222    for obj in Mandatory_Data_Objects:
223        obj_path = os.path.join(Local_Data_Directory, obj)
224        if os.path.exists(obj_path):
225            log.info('\t%s  found' % obj.ljust(max_width))
226        else:
227            log.info('\t%s  MISSING AND REQUIRED!' % obj.ljust(max_width))
228            have_mandatory_files = False
229
230    # at least *one* of these must exist
231    have_optional_files = False
232    for obj in Optional_Data_Objects:
233        obj_path = os.path.join(Local_Data_Directory, obj)
234        if os.path.exists(obj_path):
235            have_optional_files = True
236            log.info('\t%s  found' % obj.ljust(max_width))
237        else:
238            log.info('\t%s  MISSING!' % obj.ljust(max_width))
239
240    if not have_mandatory_files or not have_optional_files:
241        log.critical('You must obtain the missing files before you can run '
242                     'this validation.')
243        return False
244
245    log.critical('You have the required files.')
246
247    return True
248
249
250def set_environment():
251    # modify environment so we use the local data
252    new_inundationhome = os.path.join(Local_Data_Directory, '')
253    os.environ['INUNDATIONHOME'] = new_inundationhome
254    new_muxhome = os.path.join(Local_Data_Directory, 'data')
255    os.environ['MUXHOME'] = new_muxhome
256
257
258def run_simulation(vtype, sim_obj):
259    '''Run a simulation.
260
261    Returns True if all went well, else False.
262    '''
263   
264    # untar the object
265    tar_path = os.path.join(Local_Data_Directory, sim_obj)
266    log.info('Untarring %s in directory %s ...'
267             % (tar_path, Local_Data_Directory))
268    untar_file(tar_path, target_dir=Local_Data_Directory)
269
270    # modify project.py template
271    fd = open('project.py.template', 'r')
272    project = fd.readlines()
273    fd.close()
274
275    new_project = []
276    for line in project:
277        new_project.append(line.replace('#!SETUP!#', vtype.lower()))
278           
279    fd = open('project.py', 'w')
280    fd.write(''.join(new_project))
281    fd.close()
282   
283    # import new project.py
284    import project
285
286    # run the simulation, produce SWW file
287    cmd = 'python run_model.py > %s' % RUNMODEL_STDOUT
288    log.debug("run_simulation: doing '%s'" % cmd)
289    res = os.system(cmd)
290    log.debug("run_simulation: res=%d" % res)
291
292    # 'unimport' project.py
293    del project
294
295    # check result
296    if res != 0:
297        log.critical('Simulation failed, check log')
298
299    return res == 0
300
301def check_that_output_is_as_expected(expected_sww, valid_sww):
302    '''Check that validation output is as required.'''
303
304    # get path to expected SWW file
305    log.critical('Checking that simulation results are as expected ...')
306    local_sww = os.path.join(Local_Data_Directory, valid_sww)
307
308    # get output directory from stdout capture file
309    try:
310        fd = open(RUNMODEL_STDOUT, 'r')
311    except IOError, e:
312        log.critical("Can't open catch file '%s': %s"
313                     % (RUNMODEL_STDOUT, str(e)))
314        return 1
315    lines = fd.readlines()
316    fd.close
317
318    output_directory = None
319    for line in lines:
320        if line.startswith(OUTDIR_PREFIX):
321            output_directory = line.replace(OUTDIR_PREFIX, '', 1)
322            output_directory = output_directory.strip('\n')
323            break
324    if output_directory is None:
325        log.critical("Couldn't find line starting with '%s' in file '%s'"
326                     % (OUTDIR_PREFIX, RUNMODEL_STDOUT))
327        return 1
328
329    log.debug('check_that_output_is_as_expected: output_directory=%s'
330              % output_directory)
331   
332    # compare SWW files here and there
333    new_output_sww = os.path.join(output_directory, expected_sww)
334    cmd = 'python cmpsww.py %s %s > cmpsww.stdout' % (local_sww, new_output_sww)
335    log.debug("check_that_output_is_as_expected: doing '%s'" % cmd)
336    res = os.system(cmd)
337    log.debug("check_that_output_is_as_expected: res=%d" % res)
338    if res == 0:
339        log.info('Simulation results are as expected.')
340    else:
341        log.critical('Simulation results are NOT as expected.')
342        fd = open('cmpsww.stdout', 'r')
343        cmp_error = fd.readlines()
344        fd.close()
345        log.critical('\n' + ''.join(cmp_error))
346
347
348def teardown():
349    '''Clean up after validation run.'''
350
351    log.debug('teardown: called')
352   
353##    # clear all data objects from local data directory
354##    for data_object in Local_Data_Objects:
355##        obj_path = os.path.join(Local_Data_Directory, data_object)
356##        if os.path.isfile(obj_path):
357##            os.remove(obj_path)
358##        else:
359##            shutil.rmtree(obj_path, ignore_errors=True)
360
361    # remove remote directory and stdout capture file
362    shutil.rmtree(Remote_Data_Directory, ignore_errors=True)
363    try:
364        os.remove(RUNMODEL_STDOUT)
365    except OSError:
366        pass
367           
368
369################################################################################
370# Mainline - run the simulation, check output.
371################################################################################
372
373# set logging levels
374log.console_logging_level = log.INFO
375log.log_logging_level = log.DEBUG
376setup()
377
378# prepare user for what is about to happen
379
380msg = '''
381This validation requires a working internet connection to refresh it's files.
382You may still run this validation without an internet connection if you have the
383required files.
384
385If you are behind a proxy server you will need to supply your proxy details
386such as the proxy server address and your proxy username and password.  These
387can be defined in one or more of the environment variables:
388    HTTP_PROXY
389    PROXY_USERNAME
390    PROXY_PASSWORD
391if you wish.  If not supplied in environment variables you will be prompted for
392the information.
393'''
394
395log.critical(msg)
396
397# make sure local data is up to date
398all_objects = Mandatory_Data_Objects + Optional_Data_Objects
399if not refresh_local_data(all_objects, Local_Data_Directory, MIRRORS):
400    if not can_we_run():
401        log.critical("Can't refresh via the internet and you don't have the "
402                     "required files.")
403        log.critical('Terminating the validation.')
404        log.critical('')
405        log.critical('If you get the missing files from %s' % DATA_FILES_URL)
406        log.critical('then you can try to run the validation again.  Put the '
407                     'files into the directory')
408        log.critical("%s." % Local_Data_Directory)
409        sys.exit(10)
410
411# now untar mandatory objects
412for obj in Mandatory_Data_Objects:
413    tar_path = os.path.join(Local_Data_Directory, obj)
414    log.info('Untarring %s in directory %s ...'
415             % (tar_path, Local_Data_Directory))
416    untar_file(tar_path, target_dir=Local_Data_Directory)
417
418# set required environment variables
419set_environment()
420
421# now run what simulations we can and check output is as expected
422for odo in Optional_Data_Objects:
423    (_, vtype, _) = odo.rsplit('.', 2)
424    vtype = vtype.lower()
425    log.critical("Running Patong '%s' validation ..." % vtype)
426    if run_simulation(vtype, odo):
427        # get SWW names expected and valid, check 'equal'
428        (valid_sww, _) = odo.rsplit('.', 1)
429        (expected_sww, _) = valid_sww.rsplit('.', 1)
430        check_that_output_is_as_expected(expected_sww, valid_sww)
431
432# clean up
433teardown()
Note: See TracBrowser for help on using the repository browser.