source: anuga_work/development/friction_UA_flume_2006/calc_norm.py @ 4351

Last change on this file since 4351 was 4351, checked in by duncan, 18 years ago

checking in a scenario

File size: 5.1 KB
Line 
1"""
2Idea's
3have it use cache!
4"""
5
6
7#----------------------------------------------------------------------------
8# Import necessary modules
9#----------------------------------------------------------------------------
10
11# Standard modules
12import os
13from os import sep, path
14import csv
15
16# Related major packages
17import project             
18from anuga.abstract_2d_finite_volumes.util import file_function
19from numerical_tools import  err   # norm, corr, 
20
21
22def load_csv(file_name):
23    """
24    Load in the csv as a dic, title as key and column info as value, .
25    Also, create a dic, title as key and column index as value,
26    to keep track of the column order.
27    """
28    #
29    attribute_dic = {}
30    title_index_dic = {}
31    titles_stripped = [] # list of titles
32   
33    reader = csv.reader(file(file_name))
34
35    # Read in and manipulate the title info
36    titles = reader.next()
37    for i,title in enumerate(titles):
38        titles_stripped.append(title.strip())
39        title_index_dic[title.strip()] = i
40    title_count = len(titles_stripped)       
41    #print "title_index_dic",title_index_dic
42
43   
44    #create a dic of colum values, indexed by column title
45    for line in reader:
46        if len(line) <> title_count:
47            raise IOError #FIXME make this nicer
48        for i, value in enumerate(line):
49            attribute_dic.setdefault(titles_stripped[i],[]).append(value)
50       
51    return attribute_dic, title_index_dic
52
53
54def write_norm_results( file_name, norms):
55    fd = open(file_name, 'w')
56    fd.write( 'sww_file, friction, 2nd normal' + '\n')
57    for norm in norms:
58        fd.write(norm[0] + ', ' + str(norm[1]) + ', ' + str(norm[2]) + '\n')
59    fd.close()   
60   
61def calc_norm(flume_name,
62              is_trial_run=False,
63              outputdir_name=None,
64              pro_instance=None,
65              max_time=None):   
66    if is_trial_run is True:
67        outputdir_name += '_test'
68    if pro_instance is None:
69        pro_instance = project.Project(['data','flumes','dam_2007'],
70                                       outputdir_name=outputdir_name)
71   
72   
73    #  Read in the flume data
74    flume_results = path.join(pro_instance.home, 'data', 'flumes',
75                              'dam_UA_data_2006',
76                              flume_name)
77    flume_results, _ = load_csv(flume_results)
78    #print "flume_results",flume_results #.keys()
79    if max_time is not None:
80        for i,time in enumerate(flume_results['time']):
81            if float(time) > float(max_time):
82                break
83        for keys in flume_results.iterkeys():
84            flume_results[keys] = flume_results[keys][0:i]
85   
86    #print "flume_results['time']", flume_results['time']
87    #Get a list of the files
88    files = os.listdir(pro_instance.outputdir)
89    files = [x for x in files if x[-4:] == '.sww']
90    files.sort()
91    print "checking against files: ",files
92
93    quantities = ['stage', 'elevation']
94    gauge_locations = [
95        [5.1,0.225],  #0.5m from SWL
96        [6.6,0.225],  #2m from SWL
97        [6.95,0.255], #2.35m from SWL
98        [7.6,0.255],  #3m from SWL
99        [8.1,0.255],  #3.5m from SWL
100        [9.1,0.255]  #4.5m from SWL
101        ]
102
103    norm_sums = []
104    for sww_file in files:
105        #print "sww_file",sww_file
106        file_instance = file_function(pro_instance.outputdir + sep + sww_file,
107                                      quantities = quantities,
108                                      interpolation_points = gauge_locations,
109                                      verbose = True,
110                                      use_cache = True)
111        # file_instance(0.02, point_id=0) #This works
112        norm_sum = 0
113        for location in range(len(gauge_locations)):
114            #print "location",location
115            #print " gauge_locations[location]", gauge_locations[location]
116            simulated_depths = []
117            #print "flume_results['time']",flume_results['time']
118            for time in flume_results['time']:
119                quantities_slice = file_instance(float(time),
120                                                 point_id=location)
121                #print "quantities", quantities_slice
122                depth = quantities_slice[0] - quantities_slice[1]
123                simulated_depths.append(depth)
124            #print "simulated_depths",simulated_depths
125            # get the flume depth at this location
126            flume_depths = flume_results[str(gauge_locations[location][0])]
127            flume_depths = [float(i) for i in flume_depths]
128            # calc the norm
129            #print "map(None, simulated_depths, flume_depths)", \
130            #      map(None, simulated_depths, flume_depths)
131            norm = err(simulated_depths,
132                       flume_depths, 2, relative = True)  # 2nd norm (rel. RMS)
133            norm_sum += norm
134        print "norm_sum",norm_sum
135        norm_sums.append([sww_file, sww_file[2:-4], norm_sum])
136        norm_file_name = pro_instance.outputdir + sep + \
137                           flume_name[:-4]+'_norm' + flume_name[-4:]
138        write_norm_results(norm_file_name, norm_sums)
139
140#-------------------------------------------------------------
141if __name__ == "__main__":
142    calc_norm(
143        'ensemble_average_h_smooth.csv',
144        #'ensemble_average_h_rough.csv',
145        is_trial_run = False,   # adds _test to outputdir_name
146        outputdir_name='friction_UA_new_meshII',
147        max_time=None)
148
Note: See TracBrowser for help on using the repository browser.