source: branches/numpy_anuga_validation/automated_validation_tests/patong_beach_validation/validate.py @ 6927

Last change on this file since 6927 was 6927, checked in by rwilson, 15 years ago

Back-merge changes to/from numpy branch.

  • Property svn:executable set to *
File size: 14.6 KB
Line 
1'''
2Automatic verification that the ANUGA code runs the Patong simulation
3and produces the expected output.
4
5Required files are downloaded from the ANUGA servers if they are
6out of date or missing.
7'''
8
9import sys
10import os
11import glob
12import unittest
13import time
14import shutil
15
16from anuga.utilities.system_tools import get_web_file, untar_file, file_length
17import anuga.utilities.log as log
18
19
20# sourceforge download mirror hosts (must end with '/')
21# try these in turn for each file
22## NOTE: It would be more reliable if we could somehow 'poll' Sourceforge
23##       for a list of mirrors instead of hard-coding a list here.  That may
24##       be too difficult, as we only get the chance to select a mirror when
25##       actually downloading a file.
26#MIRRORS = ['http://transact.dl.sourceforge.net/sourceforge/anuga/',       # au
27#           'http://voxel.dl.sourceforge.net/sourceforge/anuga/',          # us
28#           'http://superb-west.dl.sourceforge.net/sourceforge/anuga/',    # us
29#           'http://jaist.dl.sourceforge.net/sourceforge/anuga/',          # jp
30#           'http://dfn.dl.sourceforge.net/sourceforge/anuga/'             # de
31#          ]
32MIRRORS = ['http://10.7.64.243/patong_validation_data/']       # local linux box
33
34# URL to hand-get data files, if required
35DATA_FILES_URL = 'http://sourceforge.net/project/showfiles.php?group_id=172848'
36
37# sequence of mandatory local data objects
38Mandatory_Data_Objects = ('data.tgz',)
39
40# sequence of optional local data objects.
41# these names must be of the form <scene>.sww.<type>.tgz
42# as code below depends upon it.
43Optional_Data_Objects = ('patong.sww.TRIAL.tgz',
44                         'patong.sww.BASIC.tgz',
45                         'patong.sww.FINAL.tgz'
46                        )
47
48# path to the local data directory
49Local_Data_Directory = 'local_data'
50
51# path to the remote data directory
52Remote_Data_Directory = 'remote_data'
53
54# name of stdout catch file for runmodel.py
55RUNMODEL_STDOUT = 'runmodel.stdout'
56
57# text at start of 'output dir' line in RUNMODEL_STDOUT file
58OUTDIR_PREFIX = 'Make directory '
59
60# Name of SWW file produced by run_model.py
61OUTPUT_SWW = 'patong.sww'
62
63
64def setup():
65    '''Prepare for the validation run.
66
67    Check we have required data set in project.py.
68    '''
69   
70    pass
71
72
73def refresh_local_data(data_objects, target_dir, mirrors):
74    '''Update local data objects from the server.
75
76    data_objects:   list of files to refresh
77    target_dir:     directory in which to put files
78    mirrors:        list of mirror sites to use
79   
80    Each file has an associated *.digest file used to decide
81    if the local file needs refreshing.
82   
83    Return True if all went well, else False.
84    '''
85
86    # decision function to decide if a file contains HTML
87    def is_html(filename):
88        '''Decide if given file contains HTML.'''
89       
90        fd = open(filename)
91        data = fd.read(1024)
92        fd.close()
93
94        if 'DOCTYPE' in data:
95            return True
96       
97        return False
98
99   
100    # local function to get remote file from one of mirrors
101    def get_remote_from_mirrors(remote, local, auth, mirrors):
102        '''Get 'remote' from one of 'mirrors', put in 'local'.'''
103
104        # Get a unique date+time string to defeat caching.  The idea is to add
105        # this to the end of any URL so proxy sees a different request.
106        cache_defeat = '?' + time.strftime('%Y%m%d%H%M%S')
107
108        # try each mirror when getting file
109        for mirror in mirrors:
110            log.debug('Fetching remote file %s from mirror %s'
111                      % (remote, mirror))
112
113            remote_url = mirror + remote + cache_defeat
114            (result, auth) = get_web_file(remote_url, local, auth=auth)
115            if result and is_html(local)==False:
116                log.debug('Success fetching file %s' % remote)
117                return (True, auth)
118            log.debug('Failure fetching from %s' % mirror)
119
120        log.debug('Failure fetching file %s' % remote)
121        return (False, auth)           
122               
123
124    # local function to compare contents of two files
125    def files_same(file_a, file_b):
126        '''Compare two files to see if contents are the same.'''
127       
128        fd = open(file_a, 'r')
129        data_a = fd.read()
130        fd.close()
131
132        fd = open(file_b, 'r')
133        data_b = fd.read()
134        fd.close()
135
136        return data_a == data_b
137
138       
139    # local function to update one data object
140    def refresh_object(obj, auth, mirrors):
141        '''Update object 'obj' using authentication tuple 'auth'.
142       
143        Return (True, <updated_auth>) if all went well,
144        else (False, <updated_auth>).
145        '''
146
147        # create local and remote file paths.
148        obj_digest = obj + '.digest'
149       
150        remote_file = os.path.join(Remote_Data_Directory, obj)
151        remote_digest = remote_file + '.digest'
152       
153        local_file = os.path.join(Local_Data_Directory, obj)
154        local_digest = local_file + '.digest'
155       
156        # see if missing either digest or object .tgz
157        if not os.path.exists(local_digest) or not os.path.exists(local_file):
158            # no digest or no object, download both digest and object
159            (res, auth) = get_remote_from_mirrors(obj_digest, local_digest, auth, mirrors)
160            if res:
161                (res, auth) = get_remote_from_mirrors(obj, local_file, auth, mirrors)
162        else:
163            # download object digest to remote data directory
164            (res, auth) = get_remote_from_mirrors(obj_digest, remote_digest, auth, mirrors)
165            if res:
166                if not files_same(local_digest, remote_digest):
167                    # digests differ, refresh object
168                    shutil.move(remote_digest, local_digest)
169                    (res, auth) = get_remote_from_mirrors(obj, local_file, auth, mirrors)
170
171        return (res, auth)
172
173    # create local data directory if required
174    log.debug('Creating local directory: %s' % Local_Data_Directory)
175    if not os.path.exists(Local_Data_Directory):
176        os.mkdir(Local_Data_Directory)
177
178    # clean out remote data copy directory
179    log.debug('Cleaning remote directory: %s' % Remote_Data_Directory)
180    shutil.rmtree(Remote_Data_Directory, ignore_errors=True)
181    os.mkdir(Remote_Data_Directory)
182
183    # success, refresh local files
184    auth = None
185    result = True
186    for data_object in data_objects:
187        log.info("Refreshing file '%s'" % data_object)
188        log.debug('refresh_local_data: getting %s from mirrors, auth=%s'
189                  % (data_object, str(auth)))
190        (res, auth) = refresh_object(data_object, auth, mirrors)
191        log.debug('refresh_local_data: returned (res,auth)=%s,%s'
192                  % (str(res), str(auth)))
193        if res == False:
194            log.info('Refresh of file %s failed.' % data_object)
195            result = False
196
197    if result:
198        log.critical('Local data has been refreshed.')
199    else:
200        log.critical('Local data has been refreshed, with one or more errors.')
201    log.critical()
202    return result
203
204
205def can_we_run():
206    '''Decide if we can run with the files we have.
207   
208    Return True if we *can* run, else False.
209
210    Tell user what is happening first, then untar files.
211    '''
212
213    log.critical('Checking if you have the required files to run:')
214
215    # get max width of object name string
216    max_width = 0
217    for obj in Mandatory_Data_Objects:
218        max_width = max(len(obj), max_width)
219    for obj in Optional_Data_Objects:
220        max_width = max(len(obj), max_width)
221
222    # if we don't have *all* mandatory object, can't run
223    have_mandatory_files = True
224    for obj in Mandatory_Data_Objects:
225        obj_path = os.path.join(Local_Data_Directory, obj)
226        if os.path.exists(obj_path):
227            log.info('\t%s  found' % obj.ljust(max_width))
228        else:
229            log.info('\t%s  MISSING AND REQUIRED!' % obj.ljust(max_width))
230            have_mandatory_files = False
231
232    # at least *one* of these must exist
233    have_optional_files = False
234    for obj in Optional_Data_Objects:
235        obj_path = os.path.join(Local_Data_Directory, obj)
236        if os.path.exists(obj_path):
237            have_optional_files = True
238            log.info('\t%s  found' % obj.ljust(max_width))
239        else:
240            log.info('\t%s  MISSING!' % obj.ljust(max_width))
241
242    if not have_mandatory_files or not have_optional_files:
243        log.critical('You must obtain the missing files before you can run '
244                     'this validation.')
245        return False
246
247    log.critical('You have enough required files to run.')
248    log.critical()
249
250    return True
251
252
253def set_environment():
254    # modify environment so we use the local data
255    new_inundationhome = os.path.join(Local_Data_Directory, '')
256    os.environ['INUNDATIONHOME'] = new_inundationhome
257    new_muxhome = os.path.join(Local_Data_Directory, 'data')
258    os.environ['MUXHOME'] = new_muxhome
259
260
261def run_simulation(vtype, sim_obj):
262    '''Run a simulation.
263
264    Returns True if all went well, else False.
265    '''
266   
267    # untar the object
268    tar_path = os.path.join(Local_Data_Directory, sim_obj)
269    log.info('Untarring %s in directory %s ...'
270             % (tar_path, Local_Data_Directory))
271    untar_file(tar_path, target_dir=Local_Data_Directory)
272
273    # modify project.py template
274    log.debug("Creating '%s' version of project.py" % vtype)
275    fd = open('project.py.template', 'r')
276    project = fd.readlines()
277    fd.close()
278
279    new_project = []
280    for line in project:
281        new_project.append(line.replace('#!SETUP!#', vtype.lower()))
282           
283    fd = open('project.py', 'w')
284    fd.write(''.join(new_project))
285    fd.close()
286   
287    # import new project.py
288    import project
289
290    # run the simulation, produce SWW file
291    log.info('Running the simulation ...')
292    cmd = 'python run_model.py > %s' % RUNMODEL_STDOUT
293    log.debug("run_simulation: doing '%s'" % cmd)
294    res = os.system(cmd)
295    log.debug("run_simulation: res=%d" % res)
296
297    # 'unimport' project.py
298    del project
299
300    # check result
301    if res != 0:
302        log.critical('Simulation failed, check log')
303
304    return res == 0
305
306def check_that_output_is_as_expected(expected_sww, valid_sww):
307    '''Check that validation output is as required.'''
308
309    # get path to expected SWW file
310    log.critical('Checking that simulation results are as expected ...')
311    local_sww = os.path.join(Local_Data_Directory, valid_sww)
312
313    # get output directory from stdout capture file
314    try:
315        fd = open(RUNMODEL_STDOUT, 'r')
316    except IOError, e:
317        log.critical("Can't open catch file '%s': %s"
318                     % (RUNMODEL_STDOUT, str(e)))
319        return 1
320    lines = fd.readlines()
321    fd.close
322
323    output_directory = None
324    for line in lines:
325        if line.startswith(OUTDIR_PREFIX):
326            output_directory = line.replace(OUTDIR_PREFIX, '', 1)
327            output_directory = output_directory.strip('\n')
328            break
329    if output_directory is None:
330        log.critical("Couldn't find line starting with '%s' in file '%s'"
331                     % (OUTDIR_PREFIX, RUNMODEL_STDOUT))
332        return 1
333
334    log.debug('check_that_output_is_as_expected: output_directory=%s'
335              % output_directory)
336   
337    # compare SWW files here and there
338    new_output_sww = os.path.join(output_directory, expected_sww)
339    cmd = 'python cmpsww.py %s %s > cmpsww.stdout' % (local_sww, new_output_sww)
340    log.debug("check_that_output_is_as_expected: doing '%s'" % cmd)
341    res = os.system(cmd)
342    log.debug("check_that_output_is_as_expected: res=%d" % res)
343    log.critical()
344    if res == 0:
345        log.info('Simulation results are as expected.')
346    else:
347        log.critical('Simulation results are NOT as expected.')
348        fd = open('cmpsww.stdout', 'r')
349        cmp_error = fd.readlines()
350        fd.close()
351        log.critical(''.join(cmp_error))
352
353
354def teardown():
355    '''Clean up after validation run.'''
356
357    log.debug('teardown: called')
358   
359    # remove remote directory and stdout capture file
360    shutil.rmtree(Remote_Data_Directory, ignore_errors=True)
361    try:
362        os.remove(RUNMODEL_STDOUT)
363    except OSError:
364        pass
365           
366
367################################################################################
368# Mainline - run the simulation, check output.
369################################################################################
370
371# set logging levels
372log.console_logging_level = log.INFO
373log.log_logging_level = log.DEBUG
374log_filename = log.log_filename
375
376setup()
377
378# prepare user for what is about to happen
379
380msg = '''
381This validation requires a working internet connection to refresh its files.
382You may still run this validation without an internet connection if you have the
383required files.
384
385If you are behind a proxy server you will need to supply your proxy details
386such as the proxy server address and your proxy username and password.  These
387can be defined in one or more of the environment variables:
388    HTTP_PROXY
389    PROXY_USERNAME
390    PROXY_PASSWORD
391if you wish.  If not supplied in environment variables you will be prompted for
392the information.
393'''
394
395log.critical(msg)
396
397# make sure local data is up to date
398all_objects = Mandatory_Data_Objects + Optional_Data_Objects
399if not refresh_local_data(all_objects, Local_Data_Directory, MIRRORS):
400    if not can_we_run():
401        log.critical("Can't refresh via the internet and you don't have the "
402                     "required files.")
403        log.critical('Terminating the validation.')
404        log.critical('')
405        log.critical('If you get the missing files from %s' % DATA_FILES_URL)
406        log.critical('then you can try to run the validation again.  Put the '
407                     'files into the directory')
408        log.critical("%s." % Local_Data_Directory)
409        sys.exit(10)
410
411# now untar mandatory objects
412for obj in Mandatory_Data_Objects:
413    tar_path = os.path.join(Local_Data_Directory, obj)
414    log.info('Untarring %s in directory %s ...'
415             % (tar_path, Local_Data_Directory))
416    untar_file(tar_path, target_dir=Local_Data_Directory)
417
418# set required environment variables
419set_environment()
420
421# now run what simulations we can and check output is as expected
422for odo in Optional_Data_Objects:
423    start_time = time.time()
424    (_, vtype, _) = odo.rsplit('.', 2)
425    vtype = vtype.lower()
426    log.critical('#' * 72)
427    log.critical("Running Patong '%s' validation ..." % vtype)
428    if run_simulation(vtype, odo):
429        # get SWW names expected and valid, check 'equal'
430        (valid_sww, _) = odo.rsplit('.', 1)
431        (expected_sww, _) = valid_sww.rsplit('.', 1)
432        check_that_output_is_as_expected(expected_sww, valid_sww)
433    shutil.move(log_filename, '%s.%s' % (log_filename, vtype))
434    stop_time = time.time()
435    log.critical("'%s' validation took %.1fs" % (vtype, stop_time - start_time))
436
437# clean up
438teardown()
Note: See TracBrowser for help on using the repository browser.