source: branches/numpy_anuga_validation/automated_validation_tests/patong_beach_validation/validate.py @ 6912

Last change on this file since 6912 was 6912, checked in by rwilson, 15 years ago

Changes to debugging level of many statements.

  • Property svn:executable set to *
File size: 14.5 KB
Line 
1'''
2Automatic verification that the ANUGA code runs the Patong simulation
3and produces the expected output.
4
5Required files are downloaded from the ANUGA servers if they are
6out of date or missing.
7'''
8
9import sys
10import os
11import glob
12import unittest
13import time
14import shutil
15
16from anuga.utilities.system_tools import get_web_file, untar_file, file_length
17import anuga.utilities.log as log
18
19
20# sourceforge download mirror hosts (must end with '/')
21# try these in turn for each file
22## NOTE: It would be more reliable if we could somehow 'poll' Sourceforge
23##       for a list of mirrors instead of hard-coding a list here.  That may
24##       be too difficult, as we only get the chance to select a mirror when
25##       actually downloading a file.
26MIRRORS = ['http://transact.dl.sourceforge.net/sourceforge/anuga/',       # au
27           'http://voxel.dl.sourceforge.net/sourceforge/anuga/',          # us
28           'http://superb-west.dl.sourceforge.net/sourceforge/anuga/',    # us
29           'http://jaist.dl.sourceforge.net/sourceforge/anuga/',          # jp
30           'http://dfn.dl.sourceforge.net/sourceforge/anuga/'             # de
31          ]
32##MIRRORS = ['http://10.7.64.243/patong_validation_data/']       # local linux box
33
34# URL to hand-get data files, if required
35DATA_FILES_URL = 'http://sourceforge.net/project/showfiles.php?group_id=172848'
36
37# sequence of mandatory local data objects
38Mandatory_Data_Objects = ('data.tgz',)
39
40# sequence of optional local data objects.
41# these names must be of the form <scene>.sww.<type>.tgz
42# as code below depends upon it.
43Optional_Data_Objects = ('patong.sww.TRIAL.tgz',
44                         'patong.sww.BASIC.tgz',
45                         'patong.sww.FINAL.tgz'
46                        )
47
48# path to the local data directory
49Local_Data_Directory = 'local_data'
50
51# path to the remote data directory
52Remote_Data_Directory = 'remote_data'
53
54# name of stdout catch file for runmodel.py
55RUNMODEL_STDOUT = 'runmodel.stdout'
56
57# text at start of 'output dir' line in RUNMODEL_STDOUT file
58OUTDIR_PREFIX = 'Make directory '
59
60# Name of SWW file produced by run_model.py
61OUTPUT_SWW = 'patong.sww'
62
63
64def setup():
65    '''Prepare for the validation run.
66
67    Check we have required data set in project.py.
68    '''
69   
70    pass
71
72
73def refresh_local_data(data_objects, target_dir, mirrors):
74    '''Update local data objects from the server.
75
76    data_objects:   list of files to refresh
77    target_dir:     directory in which to put files
78    mirrors:        list of mirror sites to use
79   
80    Each file has an associated *.digest file used to decide
81    if the local file needs refreshing.
82   
83    Return True if all went well, else False.
84    '''
85
86    # decision function to decide if a file contains HTML
87    def is_html(filename):
88        '''Decide if given file contains HTML.'''
89       
90        fd = open(filename)
91        data = fd.read(1024)
92        fd.close()
93
94        if 'DOCTYPE' in data:
95            return True
96       
97        return False
98
99   
100    # local function to get remote file from one of mirrors
101    def get_remote_from_mirrors(remote, local, auth, mirrors):
102        '''Get 'remote' from one of 'mirrors', put in 'local'.'''
103
104        # Get a unique date+time string to defeat caching.  The idea is to add
105        # this to the end of any URL so proxy sees a different request.
106        cache_defeat = '?' + time.strftime('%Y%m%d%H%M%S')
107
108        # try each mirror when getting file
109        for mirror in mirrors:
110            log.debug('Fetching remote file %s from mirror %s'
111                      % (remote, mirror))
112
113            remote_url = mirror + remote + cache_defeat
114            (result, auth) = get_web_file(remote_url, local, auth=auth)
115            if result and is_html(local)==False:
116                log.debug('Success fetching file %s' % remote)
117                return (True, auth)
118            log.debug('Failure fetching from %s' % mirror)
119
120        log.debug('Failure fetching file %s' % remote)
121        return (False, auth)           
122               
123
124    # local function to compare contents of two files
125    def files_same(file_a, file_b):
126        '''Compare two files to see if contents are the same.'''
127       
128        fd = open(file_a, 'r')
129        data_a = fd.read()
130        fd.close()
131
132        fd = open(file_b, 'r')
133        data_b = fd.read()
134        fd.close()
135
136        return data_a == data_b
137
138       
139    # local function to update one data object
140    def refresh_object(obj, auth, mirrors):
141        '''Update object 'obj' using authentication tuple 'auth'.
142       
143        Return (True, <updated_auth>) if all went well,
144        else (False, <updated_auth>).
145        '''
146
147        # create local and remote file paths.
148        obj_digest = obj + '.digest'
149       
150        remote_file = os.path.join(Remote_Data_Directory, obj)
151        remote_digest = remote_file + '.digest'
152       
153        local_file = os.path.join(Local_Data_Directory, obj)
154        local_digest = local_file + '.digest'
155       
156        # see if missing either digest or object .tgz
157        if not os.path.exists(local_digest) or not os.path.exists(local_file):
158            # no digest or no object, download both digest and object
159            (res, auth) = get_remote_from_mirrors(obj_digest, local_digest, auth, mirrors)
160            if res:
161                (res, auth) = get_remote_from_mirrors(obj, local_file, auth, mirrors)
162        else:
163            # download object digest to remote data directory
164            (res, auth) = get_remote_from_mirrors(obj_digest, remote_digest, auth, mirrors)
165            if res:
166                if not files_same(local_digest, remote_digest):
167                    # digests differ, refresh object
168                    shutil.move(remote_digest, local_digest)
169                    (res, auth) = get_remote_from_mirrors(obj, local_file, auth, mirrors)
170
171        return (res, auth)
172
173    # create local data directory if required
174    log.debug('Creating local directory: %s' % Local_Data_Directory)
175    if not os.path.exists(Local_Data_Directory):
176        os.mkdir(Local_Data_Directory)
177
178    # clean out remote data copy directory
179    log.debug('Cleaning remote directory: %s' % Remote_Data_Directory)
180    shutil.rmtree(Remote_Data_Directory, ignore_errors=True)
181    os.mkdir(Remote_Data_Directory)
182
183    # success, refresh local files
184    auth = None
185    result = True
186    for data_object in data_objects:
187        log.info("Refreshing file '%s'" % data_object)
188        log.debug('refresh_local_data: getting %s from mirrors, auth=%s'
189                  % (data_object, str(auth)))
190        (res, auth) = refresh_object(data_object, auth, mirrors)
191        log.debug('refresh_local_data: returned (res,auth)=%s,%s'
192                  % (str(res), str(auth)))
193        if res == False:
194            log.info('Refresh of file %s failed.' % data_object)
195            result = False
196
197    if result:
198        log.info('Local data has been refreshed.')
199    else:
200        log.info('Local data has been refreshed, with one or more errors.')
201    return result
202
203
204def can_we_run():
205    '''Decide if we can run with the files we have.
206   
207    Return True if we *can* run, else False.
208
209    Tell user what is happening first, then untar files.
210    '''
211
212    log.critical('Checking if you have the required files to run:')
213
214    # get max width of object name string
215    max_width = 0
216    for obj in Mandatory_Data_Objects:
217        max_width = max(len(obj), max_width)
218    for obj in Optional_Data_Objects:
219        max_width = max(len(obj), max_width)
220
221    # if we don't have *all* mandatory object, can't run
222    have_mandatory_files = True
223    for obj in Mandatory_Data_Objects:
224        obj_path = os.path.join(Local_Data_Directory, obj)
225        if os.path.exists(obj_path):
226            log.info('\t%s  found' % obj.ljust(max_width))
227        else:
228            log.info('\t%s  MISSING AND REQUIRED!' % obj.ljust(max_width))
229            have_mandatory_files = False
230
231    # at least *one* of these must exist
232    have_optional_files = False
233    for obj in Optional_Data_Objects:
234        obj_path = os.path.join(Local_Data_Directory, obj)
235        if os.path.exists(obj_path):
236            have_optional_files = True
237            log.info('\t%s  found' % obj.ljust(max_width))
238        else:
239            log.info('\t%s  MISSING!' % obj.ljust(max_width))
240
241    if not have_mandatory_files or not have_optional_files:
242        log.critical('You must obtain the missing files before you can run '
243                     'this validation.')
244        return False
245
246    log.critical('You have the required files.')
247
248    return True
249
250
251def set_environment():
252    # modify environment so we use the local data
253    new_inundationhome = os.path.join(Local_Data_Directory, '')
254    os.environ['INUNDATIONHOME'] = new_inundationhome
255    new_muxhome = os.path.join(Local_Data_Directory, 'data')
256    os.environ['MUXHOME'] = new_muxhome
257
258
259def run_simulation(vtype, sim_obj):
260    '''Run a simulation.
261
262    Returns True if all went well, else False.
263    '''
264   
265    # untar the object
266    tar_path = os.path.join(Local_Data_Directory, sim_obj)
267    log.info('Untarring %s in directory %s ...'
268             % (tar_path, Local_Data_Directory))
269    untar_file(tar_path, target_dir=Local_Data_Directory)
270
271    # modify project.py template
272    log.debug("Creating '%s' version of project.py" % vtype)
273    fd = open('project.py.template', 'r')
274    project = fd.readlines()
275    fd.close()
276
277    new_project = []
278    for line in project:
279        new_project.append(line.replace('#!SETUP!#', vtype.lower()))
280           
281    fd = open('project.py', 'w')
282    fd.write(''.join(new_project))
283    fd.close()
284   
285    # import new project.py
286    import project
287
288    # run the simulation, produce SWW file
289    log.debug('Running run_model.py')
290    cmd = 'python run_model.py > %s' % RUNMODEL_STDOUT
291    log.debug("run_simulation: doing '%s'" % cmd)
292    res = os.system(cmd)
293    log.debug("run_simulation: res=%d" % res)
294
295    # 'unimport' project.py
296    del project
297
298    # check result
299    if res != 0:
300        log.critical('Simulation failed, check log')
301
302    return res == 0
303
304def check_that_output_is_as_expected(expected_sww, valid_sww):
305    '''Check that validation output is as required.'''
306
307    # get path to expected SWW file
308    log.critical('Checking that simulation results are as expected ...')
309    local_sww = os.path.join(Local_Data_Directory, valid_sww)
310
311    # get output directory from stdout capture file
312    try:
313        fd = open(RUNMODEL_STDOUT, 'r')
314    except IOError, e:
315        log.critical("Can't open catch file '%s': %s"
316                     % (RUNMODEL_STDOUT, str(e)))
317        return 1
318    lines = fd.readlines()
319    fd.close
320
321    output_directory = None
322    for line in lines:
323        if line.startswith(OUTDIR_PREFIX):
324            output_directory = line.replace(OUTDIR_PREFIX, '', 1)
325            output_directory = output_directory.strip('\n')
326            break
327    if output_directory is None:
328        log.critical("Couldn't find line starting with '%s' in file '%s'"
329                     % (OUTDIR_PREFIX, RUNMODEL_STDOUT))
330        return 1
331
332    log.debug('check_that_output_is_as_expected: output_directory=%s'
333              % output_directory)
334   
335    # compare SWW files here and there
336    new_output_sww = os.path.join(output_directory, expected_sww)
337    cmd = 'python cmpsww.py %s %s > cmpsww.stdout' % (local_sww, new_output_sww)
338    log.debug("check_that_output_is_as_expected: doing '%s'" % cmd)
339    res = os.system(cmd)
340    log.debug("check_that_output_is_as_expected: res=%d" % res)
341    log.critical()
342    if res == 0:
343        log.info('Simulation results are as expected.')
344    else:
345        log.critical('Simulation results are NOT as expected.')
346        fd = open('cmpsww.stdout', 'r')
347        cmp_error = fd.readlines()
348        fd.close()
349        log.critical(''.join(cmp_error))
350
351
352def teardown():
353    '''Clean up after validation run.'''
354
355    log.debug('teardown: called')
356   
357    # remove remote directory and stdout capture file
358    shutil.rmtree(Remote_Data_Directory, ignore_errors=True)
359    try:
360        os.remove(RUNMODEL_STDOUT)
361    except OSError:
362        pass
363           
364
365################################################################################
366# Mainline - run the simulation, check output.
367################################################################################
368
369# set logging levels
370log.console_logging_level = log.INFO
371log.log_logging_level = log.DEBUG
372log_filename = log.log_filename
373
374setup()
375
376# prepare user for what is about to happen
377
378msg = '''
379This validation requires a working internet connection to refresh its files.
380You may still run this validation without an internet connection if you have the
381required files.
382
383If you are behind a proxy server you will need to supply your proxy details
384such as the proxy server address and your proxy username and password.  These
385can be defined in one or more of the environment variables:
386    HTTP_PROXY
387    PROXY_USERNAME
388    PROXY_PASSWORD
389if you wish.  If not supplied in environment variables you will be prompted for
390the information.
391'''
392
393log.critical(msg)
394
395# make sure local data is up to date
396all_objects = Mandatory_Data_Objects + Optional_Data_Objects
397if not refresh_local_data(all_objects, Local_Data_Directory, MIRRORS):
398    if not can_we_run():
399        log.critical("Can't refresh via the internet and you don't have the "
400                     "required files.")
401        log.critical('Terminating the validation.')
402        log.critical('')
403        log.critical('If you get the missing files from %s' % DATA_FILES_URL)
404        log.critical('then you can try to run the validation again.  Put the '
405                     'files into the directory')
406        log.critical("%s." % Local_Data_Directory)
407        sys.exit(10)
408
409# now untar mandatory objects
410for obj in Mandatory_Data_Objects:
411    tar_path = os.path.join(Local_Data_Directory, obj)
412    log.info('Untarring %s in directory %s ...'
413             % (tar_path, Local_Data_Directory))
414    untar_file(tar_path, target_dir=Local_Data_Directory)
415
416# set required environment variables
417set_environment()
418
419# now run what simulations we can and check output is as expected
420for odo in Optional_Data_Objects:
421    start_time = time.time()
422    (_, vtype, _) = odo.rsplit('.', 2)
423    vtype = vtype.lower()
424    log.critical('#' * 72)
425    log.critical("Running Patong '%s' validation ..." % vtype)
426    if run_simulation(vtype, odo):
427        # get SWW names expected and valid, check 'equal'
428        (valid_sww, _) = odo.rsplit('.', 1)
429        (expected_sww, _) = valid_sww.rsplit('.', 1)
430        check_that_output_is_as_expected(expected_sww, valid_sww)
431    shutil.move(log_filename, '%s.%s' % (log_filename, vtype))
432    stop_time = time.time()
433    log.critical('%s validation took %.1fs' % (stop_time - start_time))
434
435# clean up
436teardown()
Note: See TracBrowser for help on using the repository browser.