source: branches/numpy_anuga_validation/automated_validation_tests/patong_beach_validation/validate.py @ 6907

Last change on this file since 6907 was 6907, checked in by rwilson, 15 years ago

Put changes back before going to work.

  • Property svn:executable set to *
File size: 14.7 KB
Line 
1'''
2Automatic verification that the ANUGA code runs the Patong simulation
3and produces the expected output.
4
5Required files are downloaded from the ANUGA servers if they are
6out of date or missing.
7'''
8
9import sys
10import os
11import glob
12import unittest
13import time
14import shutil
15
16from anuga.utilities.system_tools import get_web_file, untar_file, file_length
17import anuga.utilities.log as log
18
19
20# sourceforge download mirror hosts (must end with '/')
21# try these in turn for each file
22## NOTE: It would be more reliable if we could somehow 'poll' Sourceforge
23##       for a list of mirrors instead of hard-coding a list here.  That may
24##       be too difficult, as we only get the chance to select a mirror when
25##       actually downloading a file.
26MIRRORS = ['http://transact.dl.sourceforge.net/sourceforge/anuga/',       # au
27           'http://voxel.dl.sourceforge.net/sourceforge/anuga/',          # us
28           'http://superb-west.dl.sourceforge.net/sourceforge/anuga/',    # us
29           'http://jaist.dl.sourceforge.net/sourceforge/anuga/',          # jp
30           'http://dfn.dl.sourceforge.net/sourceforge/anuga/'             # de
31          ]
32##MIRRORS = ['http://10.7.64.243/patong_validation_data/']       # local linux box
33
34# URL to hand-get data files, if required
35DATA_FILES_URL = 'http://sourceforge.net/project/showfiles.php?group_id=172848'
36
37# sequence of mandatory local data objects
38Mandatory_Data_Objects = ('data.tgz',)
39
40# sequence of optional local data objects.
41# these names must be of the form <scene>.sww.<type>.tgz
42# as code below depends upon it.
43Optional_Data_Objects = ('patong.sww.TRIAL.tgz',
44                         'patong.sww.BASIC.tgz',
45                         'patong.sww.FINAL.tgz'
46                        )
47
48# path to the local data directory
49Local_Data_Directory = 'local_data'
50
51# path to the remote data directory
52Remote_Data_Directory = 'remote_data'
53
54# name of stdout catch file for runmodel.py
55RUNMODEL_STDOUT = 'runmodel.stdout'
56
57# text at start of 'output dir' line in RUNMODEL_STDOUT file
58OUTDIR_PREFIX = 'Make directory '
59
60# Name of SWW file produced by run_model.py
61OUTPUT_SWW = 'patong.sww'
62
63
64def setup():
65    '''Prepare for the validation run.
66
67    Check we have required data set in project.py.
68    '''
69   
70    pass
71
72
73def refresh_local_data(data_objects, target_dir, mirrors):
74    '''Update local data objects from the server.
75
76    data_objects:   list of files to refresh
77    target_dir:     directory in which to put files
78    mirrors:        list of mirror sites to use
79   
80    Each file has an associated *.digest file used to decide
81    if the local file needs refreshing.
82   
83    Return True if all went well, else False.
84    '''
85
86    # decision function to decide if a file contains HTML
87    def is_html(filename):
88        '''Decide if given file contains HTML.'''
89       
90        fd = open(filename)
91        data = fd.read(1024)
92        fd.close()
93
94        if 'DOCTYPE' in data:
95            return True
96       
97        return False
98
99   
100    # local function to get remote file from one of mirrors
101    def get_remote_from_mirrors(remote, local, auth, mirrors):
102        '''Get 'remote' from one of 'mirrors', put in 'local'.'''
103
104        # Get a unique date+time string to defeat caching.  The idea is to add
105        # this to the end of any URL so proxy sees a different request.
106        cache_defeat = '?' + time.strftime('%Y%m%d%H%M%S')
107
108        # try each mirror when getting file
109        for mirror in mirrors:
110            log.debug('Fetching remote file %s from mirror %s'
111                      % (remote, mirror))
112
113            remote_url = mirror + remote + cache_defeat
114            (result, auth) = get_web_file(remote_url, local, auth=auth)
115            if result and is_html(local)==False:
116                log.debug('Success fetching file %s' % remote)
117                return (True, auth)
118            log.debug('Failure fetching from %s' % mirror)
119
120        log.debug('Failure fetching file %s' % remote)
121        return (False, auth)           
122               
123
124    # local function to compare contents of two files
125    def files_same(file_a, file_b):
126        '''Compare two files to see if contents are the same.'''
127       
128        fd = open(file_a, 'r')
129        data_a = fd.read()
130        fd.close()
131
132        fd = open(file_b, 'r')
133        data_b = fd.read()
134        fd.close()
135
136        return data_a == data_b
137
138       
139    # local function to update one data object
140    def refresh_object(obj, auth, mirrors):
141        '''Update object 'obj' using authentication tuple 'auth'.
142       
143        Return (True, <updated_auth>) if all went well,
144        else (False, <updated_auth>).
145        '''
146
147        # create local and remote file paths.
148        obj_digest = obj + '.digest'
149       
150        remote_file = os.path.join(Remote_Data_Directory, obj)
151        remote_digest = remote_file + '.digest'
152       
153        local_file = os.path.join(Local_Data_Directory, obj)
154        local_digest = local_file + '.digest'
155       
156        # see if missing either digest or object .tgz
157        if not os.path.exists(local_digest) or not os.path.exists(local_file):
158            # no digest or no object, download both digest and object
159            (res, auth) = get_remote_from_mirrors(obj_digest, local_digest, auth, mirrors)
160            if res:
161                (res, auth) = get_remote_from_mirrors(obj, local_file, auth, mirrors)
162        else:
163            # download object digest to remote data directory
164            (res, auth) = get_remote_from_mirrors(obj_digest, remote_digest, auth, mirrors)
165            if res:
166                if not files_same(local_digest, remote_digest):
167                    # digests differ, refresh object
168                    shutil.move(remote_digest, local_digest)
169                    (res, auth) = get_remote_from_mirrors(obj, local_file, auth, mirrors)
170
171        return (res, auth)
172
173    # create local data directory if required
174    log.debug('Creating local directory: %s' % Local_Data_Directory)
175    if not os.path.exists(Local_Data_Directory):
176        os.mkdir(Local_Data_Directory)
177
178    # clean out remote data copy directory
179    log.debug('Cleaning remote directory: %s' % Remote_Data_Directory)
180    shutil.rmtree(Remote_Data_Directory, ignore_errors=True)
181    os.mkdir(Remote_Data_Directory)
182
183    # success, refresh local files
184    auth = None
185    result = True
186    for data_object in data_objects:
187        log.info("Refreshing file '%s'" % data_object)
188        log.debug('refresh_local_data: getting %s from mirrors, auth=%s'
189                  % (data_object, str(auth)))
190        (res, auth) = refresh_object(data_object, auth, mirrors)
191        log.debug('refresh_local_data: returned (res,auth)=%s,%s'
192                  % (str(res), str(auth)))
193        if res == False:
194            log.info('Refresh of file %s failed.' % data_object)
195            result = False
196
197    if result:
198        log.info('Local data has been refreshed.')
199    else:
200        log.info('Local data has been refreshed, with one or more errors.')
201    return result
202
203
204def can_we_run():
205    '''Decide if we can run with the files we have.
206   
207    Return True if we *can* run, else False.
208
209    Tell user what is happening first, then untar files.
210    '''
211
212    log.critical('Checking if you have the required files to run:')
213
214    # get max width of object name string
215    max_width = 0
216    for obj in Mandatory_Data_Objects:
217        max_width = max(len(obj), max_width)
218    for obj in Optional_Data_Objects:
219        max_width = max(len(obj), max_width)
220
221    # if we don't have *all* mandatory object, can't run
222    have_mandatory_files = True
223    for obj in Mandatory_Data_Objects:
224        obj_path = os.path.join(Local_Data_Directory, obj)
225        if os.path.exists(obj_path):
226            log.info('\t%s  found' % obj.ljust(max_width))
227        else:
228            log.info('\t%s  MISSING AND REQUIRED!' % obj.ljust(max_width))
229            have_mandatory_files = False
230
231    # at least *one* of these must exist
232    have_optional_files = False
233    for obj in Optional_Data_Objects:
234        obj_path = os.path.join(Local_Data_Directory, obj)
235        if os.path.exists(obj_path):
236            have_optional_files = True
237            log.info('\t%s  found' % obj.ljust(max_width))
238        else:
239            log.info('\t%s  MISSING!' % obj.ljust(max_width))
240
241    if not have_mandatory_files or not have_optional_files:
242        log.critical('You must obtain the missing files before you can run '
243                     'this validation.')
244        return False
245
246    log.critical('You have the required files.')
247
248    return True
249
250
251def set_environment():
252    # modify environment so we use the local data
253    new_inundationhome = os.path.join(Local_Data_Directory, '')
254    os.environ['INUNDATIONHOME'] = new_inundationhome
255    new_muxhome = os.path.join(Local_Data_Directory, 'data')
256    os.environ['MUXHOME'] = new_muxhome
257
258
259def run_simulation(vtype, sim_obj):
260    '''Run a simulation.
261
262    Returns True if all went well, else False.
263    '''
264   
265    # untar the object
266    tar_path = os.path.join(Local_Data_Directory, sim_obj)
267    log.info('Untarring %s in directory %s ...'
268             % (tar_path, Local_Data_Directory))
269    untar_file(tar_path, target_dir=Local_Data_Directory)
270
271    # modify project.py template
272    log.info('Creating %s version of project.py' % vtype)
273    fd = open('project.py.template', 'r')
274    project = fd.readlines()
275    fd.close()
276
277    new_project = []
278    for line in project:
279        new_project.append(line.replace('#!SETUP!#', vtype.lower()))
280           
281    fd = open('project.py', 'w')
282    fd.write(''.join(new_project))
283    fd.close()
284   
285    # import new project.py
286    import project
287
288    # run the simulation, produce SWW file
289    log.info('Running run_model.py')
290    cmd = 'python run_model.py > %s' % RUNMODEL_STDOUT
291    log.debug("run_simulation: doing '%s'" % cmd)
292    res = os.system(cmd)
293    log.debug("run_simulation: res=%d" % res)
294
295    # 'unimport' project.py
296    del project
297
298    # check result
299    if res != 0:
300        log.critical('Simulation failed, check log')
301
302    return res == 0
303
304def check_that_output_is_as_expected(expected_sww, valid_sww):
305    '''Check that validation output is as required.'''
306
307    # get path to expected SWW file
308    log.critical('Checking that simulation results are as expected ...')
309    local_sww = os.path.join(Local_Data_Directory, valid_sww)
310
311    # get output directory from stdout capture file
312    try:
313        fd = open(RUNMODEL_STDOUT, 'r')
314    except IOError, e:
315        log.critical("Can't open catch file '%s': %s"
316                     % (RUNMODEL_STDOUT, str(e)))
317        return 1
318    lines = fd.readlines()
319    fd.close
320
321    output_directory = None
322    for line in lines:
323        if line.startswith(OUTDIR_PREFIX):
324            output_directory = line.replace(OUTDIR_PREFIX, '', 1)
325            output_directory = output_directory.strip('\n')
326            break
327    if output_directory is None:
328        log.critical("Couldn't find line starting with '%s' in file '%s'"
329                     % (OUTDIR_PREFIX, RUNMODEL_STDOUT))
330        return 1
331
332    log.debug('check_that_output_is_as_expected: output_directory=%s'
333              % output_directory)
334   
335    # compare SWW files here and there
336    new_output_sww = os.path.join(output_directory, expected_sww)
337    cmd = 'python cmpsww.py %s %s > cmpsww.stdout' % (local_sww, new_output_sww)
338    log.debug("check_that_output_is_as_expected: doing '%s'" % cmd)
339    res = os.system(cmd)
340    log.debug("check_that_output_is_as_expected: res=%d" % res)
341    if res == 0:
342        log.info('Simulation results are as expected.')
343    else:
344        log.critical('Simulation results are NOT as expected.')
345        fd = open('cmpsww.stdout', 'r')
346        cmp_error = fd.readlines()
347        fd.close()
348        log.critical('\n' + ''.join(cmp_error))
349
350
351def teardown():
352    '''Clean up after validation run.'''
353
354    log.debug('teardown: called')
355   
356##    # clear all data objects from local data directory
357##    for data_object in Local_Data_Objects:
358##        obj_path = os.path.join(Local_Data_Directory, data_object)
359##        if os.path.isfile(obj_path):
360##            os.remove(obj_path)
361##        else:
362##            shutil.rmtree(obj_path, ignore_errors=True)
363
364    # remove remote directory and stdout capture file
365    shutil.rmtree(Remote_Data_Directory, ignore_errors=True)
366    try:
367        os.remove(RUNMODEL_STDOUT)
368    except OSError:
369        pass
370           
371
372################################################################################
373# Mainline - run the simulation, check output.
374################################################################################
375
376# set logging levels
377log.console_logging_level = log.INFO
378log.log_logging_level = log.DEBUG
379log_filename = log.log_filename
380
381setup()
382
383# prepare user for what is about to happen
384
385msg = '''
386This validation requires a working internet connection to refresh its files.
387You may still run this validation without an internet connection if you have the
388required files.
389
390If you are behind a proxy server you will need to supply your proxy details
391such as the proxy server address and your proxy username and password.  These
392can be defined in one or more of the environment variables:
393    HTTP_PROXY
394    PROXY_USERNAME
395    PROXY_PASSWORD
396if you wish.  If not supplied in environment variables you will be prompted for
397the information.
398'''
399
400log.critical(msg)
401
402# make sure local data is up to date
403all_objects = Mandatory_Data_Objects + Optional_Data_Objects
404if not refresh_local_data(all_objects, Local_Data_Directory, MIRRORS):
405    if not can_we_run():
406        log.critical("Can't refresh via the internet and you don't have the "
407                     "required files.")
408        log.critical('Terminating the validation.')
409        log.critical('')
410        log.critical('If you get the missing files from %s' % DATA_FILES_URL)
411        log.critical('then you can try to run the validation again.  Put the '
412                     'files into the directory')
413        log.critical("%s." % Local_Data_Directory)
414        sys.exit(10)
415
416# now untar mandatory objects
417for obj in Mandatory_Data_Objects:
418    tar_path = os.path.join(Local_Data_Directory, obj)
419    log.info('Untarring %s in directory %s ...'
420             % (tar_path, Local_Data_Directory))
421    untar_file(tar_path, target_dir=Local_Data_Directory)
422
423# set required environment variables
424set_environment()
425
426# now run what simulations we can and check output is as expected
427for odo in Optional_Data_Objects:
428    (_, vtype, _) = odo.rsplit('.', 2)
429    vtype = vtype.lower()
430    log.critical('#' * 50)
431    log.critical("Running Patong '%s' validation ..." % vtype)
432    if run_simulation(vtype, odo):
433        # get SWW names expected and valid, check 'equal'
434        (valid_sww, _) = odo.rsplit('.', 1)
435        (expected_sww, _) = valid_sww.rsplit('.', 1)
436        check_that_output_is_as_expected(expected_sww, valid_sww)
437    shutil.move(log_filename, '%s.%s' % (log_filename, vtype))
438
439# clean up
440teardown()
Note: See TracBrowser for help on using the repository browser.