source: anuga_work/development/friction_UA_flume_2006/calc_norm.py @ 4420

Last change on this file since 4420 was 4420, checked in by duncan, 17 years ago

check in current scenario

File size: 5.3 KB
Line 
1"""
2Idea's
3have it use cache!
4"""
5
6
7#----------------------------------------------------------------------------
8# Import necessary modules
9#----------------------------------------------------------------------------
10
11# Standard modules
12import os
13from os import sep, path
14import csv
15
16# Related major packages
17import project             
18from anuga.abstract_2d_finite_volumes.util import file_function
19from numerical_tools import  err   # norm, corr, 
20
21
22def load_csv(file_name):
23    """
24    Load in the csv as a dic, title as key and column info as value, .
25    Also, create a dic, title as key and column index as value,
26    to keep track of the column order.
27    """
28    #
29    attribute_dic = {}
30    title_index_dic = {}
31    titles_stripped = [] # list of titles
32   
33    reader = csv.reader(file(file_name))
34
35    # Read in and manipulate the title info
36    titles = reader.next()
37    for i,title in enumerate(titles):
38        titles_stripped.append(title.strip())
39        title_index_dic[title.strip()] = i
40    title_count = len(titles_stripped)       
41    #print "title_index_dic",title_index_dic
42
43   
44    #create a dic of colum values, indexed by column title
45    for line in reader:
46        if len(line) <> title_count:
47            raise IOError #FIXME make this nicer
48        for i, value in enumerate(line):
49            attribute_dic.setdefault(titles_stripped[i],[]).append(value)
50       
51    return attribute_dic, title_index_dic
52
53
54def write_norm_results( file_name, norms):
55    fd = open(file_name, 'w')
56    fd.write( 'sww_file, friction, 2nd normal' + '\n')
57    for norm in norms:
58        fd.write(norm[0] + ', ' + str(norm[1]) + ', ' + str(norm[2]) + '\n')
59    fd.close()   
60   
61def calc_norm(flume_name,
62              is_trial_run=False,
63              outputdir_name=None,
64              pro_instance=None,
65              max_time=None):
66    """
67    Outputdir_name is directory where the sww files are.
68    The output text file name is based on the flume_name
69    """
70    if is_trial_run is True:
71        outputdir_name += '_test'
72    if pro_instance is None:
73        pro_instance = project.Project(['data','flumes','dam_2007'],
74                                       outputdir_name=outputdir_name)
75   
76   
77    #  Read in the flume data
78    flume_results = path.join(pro_instance.home, 'data', 'flumes',
79                              'dam_UA_data_2006',
80                              flume_name)
81    flume_results, _ = load_csv(flume_results)
82    #print "flume_results",flume_results #.keys()
83    if max_time is not None:
84        for i,time in enumerate(flume_results['time']):
85            if float(time) > float(max_time):
86                break
87        for keys in flume_results.iterkeys():
88            flume_results[keys] = flume_results[keys][0:i]
89   
90    #print "flume_results['time']", flume_results['time']
91    #Get a list of the files
92    files = os.listdir(pro_instance.outputdir)
93    files = [x for x in files if x[-4:] == '.sww']
94    files.sort()
95    print "checking against files: ",files
96
97    quantities = ['stage', 'elevation']
98    gauge_locations = [
99        [5.1,0.225],  #0.5m from SWL
100        [6.6,0.225],  #2m from SWL
101        [6.95,0.255], #2.35m from SWL
102        [7.6,0.255],  #3m from SWL
103        [8.1,0.255],  #3.5m from SWL
104        [9.1,0.255]  #4.5m from SWL
105        ]
106
107    norm_sums = []
108    for sww_file in files:
109        #print "sww_file",sww_file
110        file_instance = file_function(pro_instance.outputdir + sep + sww_file,
111                                      quantities = quantities,
112                                      interpolation_points = gauge_locations,
113                                      verbose = True,
114                                      use_cache = False)
115        # file_instance(0.02, point_id=0) #This works
116        norm_sum = 0
117        for location in range(len(gauge_locations)):
118            #print "location",location
119            #print " gauge_locations[location]", gauge_locations[location]
120            simulated_depths = []
121            #print "flume_results['time']",flume_results['time']
122            for time in flume_results['time']:
123                quantities_slice = file_instance(float(time),
124                                                 point_id=location)
125                #print "quantities", quantities_slice
126                depth = quantities_slice[0] - quantities_slice[1]
127                simulated_depths.append(depth)
128            #print "simulated_depths",simulated_depths
129            # get the flume depth at this location
130            flume_depths = flume_results[str(gauge_locations[location][0])]
131            flume_depths = [float(i) for i in flume_depths]
132            # calc the norm
133            #print "map(None, simulated_depths, flume_depths)", \
134            #      map(None, simulated_depths, flume_depths)
135            norm = err(simulated_depths,
136                       flume_depths, 2, relative = True)  # 2nd norm (rel. RMS)
137            norm_sum += norm
138        print "norm_sum",norm_sum
139        norm_sums.append([sww_file, sww_file[2:-4], norm_sum])
140        norm_file_name = pro_instance.outputdir + sep + \
141                           flume_name[:-4]+'_norm' + flume_name[-4:]
142        write_norm_results(norm_file_name, norm_sums)
143
144#-------------------------------------------------------------
145if __name__ == "__main__":
146    calc_norm(
147        #'ensemble_average_h_smooth.csv',
148        'ensemble_average_h_rough.csv',
149        is_trial_run = False,   # adds _test to outputdir_name
150        outputdir_name='UA_flume_built_from_scratch_0.06_stage',
151        max_time=None)
152
Note: See TracBrowser for help on using the repository browser.