source: anuga_work/production/dampier_2006/make_report_cipma.py @ 3959

Last change on this file since 3959 was 3959, checked in by sexton, 17 years ago

updates for Dampier

File size: 9.3 KB
Line 
1"""
2Generate report for production run
3
4NOTE - this will only work on Windows system (as pylab not installed on unix).
5
6Inputs:
7
8report_title:    title to be included in tex file
9production dirs: dictionary of production directories with a
10                 association to that simulation run, eg high tide,
11                 low tide and MSL.
12                   
13
14Outputs:
15
16* Report generated to scenario_report.tex where the scenario relates
17the name of the production directory this script is being run from.
18* figures used for report stored in the report_figure directory
19NOTE, this directory will need to be committed, as well as
20the latex files.
21
22The report structure is
23
24* Executive Summary
25* Introduction
26* Modelling Methodology
27* Tsunami scenario
28* Data sources
29* Inundation model
30* Inundation modelling results
31* Impact modelling
32* Summary
33* Acknowledgements
34* References
35* Appendix: Metadata
36* Appendix: Time series outputs
37
38Other files included in document which require manual intervention:
39
40* an abstract must be written in abstract.tex
41* an introduction must be written in introduction.tex; a basic outline and
42  some of the core inputs are already in place
43* the tsunami-genic event should be discussed in tsunami_scenario.tex
44* a discussion of the ANUGA model is required in anuga.tex
45* a computational_setup.tex file needs to be written for the particular scenario
46* the interpretation of the results needs to be written to interpretation.tex
47* maximum inundation map names need to be included in HAT_map and LAT_map etc.
48* damage modelling map names need to be included in HAT_damage and LAT_damage etc.
49* a summary must be written into summary.tex
50* metadata for the scenario data to be included in metadata.tex
51
52May, June 2006                   
53"""
54
55from os import getcwd, sep, altsep, mkdir, access, F_OK
56import project
57from anuga.abstract_2d_finite_volumes.util import sww2timeseries, get_gauges_from_file
58
59# Derive scenario name
60p = getcwd().split(sep)
61scenario = p[-1] # Last element of absolute CWD path
62scenario_name = scenario.split('_2006')[0] # Strip any text past `_2006`
63test = scenario_name.split('_')
64if len(test) <> 1:
65    scenario_name = '%s %s' %(test[0], test[1])
66
67# Create report directory
68reportdir = getcwd()+sep+'report'+sep
69if access(reportdir,F_OK) == 0:
70    mkdir (reportdir)
71   
72# User defined inputs
73report_title = 'Tsunami impact modelling for %s' %scenario_name.title()
74
75# WA DLI data
76production_dirs = {#'20061101_053132_run': 'big event'} # parallel
77                    #'20061101_054432_run': 'big event'} # sequential
78                    '20061107_070805_run': 'Mw 9.0'}
79
80is_parallel = True
81if is_parallel == True:
82    nodes = 8
83
84max_maps = {'Mw 9.0': 'mw9_map'}
85
86gauge_map = 'cipma_gauges.jpg'
87gauge_filename = 'test.xya'
88# Create sections and graphs for each designated production directory
89latex_output = []
90report_name = 'latexoutput_cipma'
91
92if is_parallel == True:
93
94    for i in range(nodes):
95        print 'Sending node %d of %d' %(i,nodes)
96        swwfiles = {}
97        reportname = report_name + '_%s' %(i)
98        for label_id in production_dirs.keys():
99            file_loc = project.output_dir + label_id + sep
100            sww_extra = '_P%s_%s' %(i,nodes)
101            swwfile = file_loc + project.scenario_name + sww_extra + '.sww'
102            swwfiles[swwfile] = label_id
103
104            texname, elev_output = sww2timeseries(swwfiles,
105                                                  project.gauge_filename,
106                                                  production_dirs,
107                                                  report = True,
108                                                  reportname = reportname,
109                                                  plot_quantity = ['stage', 'speed'],
110                                                  surface = False,
111                                                  time_min = None,
112                                                  time_max = None,
113                                                  title_on = False,
114                                                  verbose = True)
115           
116            latex_output.append(texname)
117   
118else:
119   
120    for label_id in production_dirs.keys():
121
122        file_loc = project.output_dir + label_id + sep
123        swwfile = file_loc + project.scenario_name + '.sww'
124        swwfiles[swwfile] = label_id
125       
126    texname, elev_output = sww2timeseries(swwfiles,
127                                          project.gauge_filename,
128                                          production_dirs,
129                                          report = True,
130                                          reportname = report_name,
131                                          plot_quantity = ['stage', 'speed'],
132                                          surface = False,
133                                          time_min = None,
134                                          time_max = None,
135                                          title_on = False,
136                                          verbose = True)
137
138# Start report generation
139# Future: generate_report(reportdir, scenario, report_title,
140# project.gauge_filename, max_maps, damage_maps, production_dirs, latex_output)
141report_name = reportdir + scenario + '_report_CIPMA.tex'
142fid = open(report_name, 'w')
143
144s = """
145% This is based on an automatically generated file (by make_report.py).
146%
147% Manual parts are:
148% * an abstract must be written in abstract.tex
149% * an introduction must be written in introduction.tex; a basic outline and
150%   some of the core inputs are already in place
151% * outline of the modelling methodology provided in modelling_methodology.tex
152% * the tsunami-genic event should be discussed in tsunami_scenario.tex
153% * an computational_setup.tex file needs to be written for the particular scenario
154% * the interpretation of the results needs to be written to interpretation.tex
155% * maximum inundation maps need to be included in HAT_map.tex and LAT_map.tex etc.
156% * damage modelling maps need to be included in HAT_damage and LAT_damage etc.
157% * a summary must be written into summary.tex
158% * metadata for the scenario data to be included in metadata.tex
159
160\documentclass{article}
161
162\usepackage{ae} % or {zefonts}
163\usepackage[T1]{fontenc}
164\usepackage[ansinew]{inputenc}
165\usepackage{amsmath}
166\usepackage{amssymb}
167\usepackage{graphicx}
168\usepackage{color}
169\usepackage[colorlinks]{hyperref}
170\usepackage{lscape} %landcape pages support
171\usepackage{setspace}
172\usepackage{rotating}
173\usepackage{pdfpages}
174\include{appendix}
175\setstretch{1.25}
176\\topmargin 0pt
177\oddsidemargin 0pt
178\evensidemargin 0pt
179\marginparwidth 0.5pt
180\\textwidth \paperwidth
181\\advance\\textwidth -2in
182
183"""
184fid.write(s)
185
186s = """
187\date{\\today}
188%\\author{Geoscience Australia}
189
190\\begin{document}
191\\title{
192\\begin{figure}[hbt]
193  \centerline{ \includegraphics[scale=0.4]{../report_figures/GAlogo.jpg}}
194\end{figure}
195"""
196fid.write(s)
197s = '%s} '%report_title
198fid.write(s)
199s = """
200  \maketitle
201
202    \section{Executive Summary}
203    \label{sec:execsum}
204  \input{execsum}
205
206  \\tableofcontents
207 
208   \section{Modelling methodology}
209    \label{sec:methodology}
210    \input{modelling_methodology}
211
212  \section{Data sources}
213    \label{sec:data}
214    \input{data}
215   
216   \section{Inundation model}
217    \label{sec:anuga}
218    \input{anuga}
219    \input{computational_setup}
220       
221  \section{Inundation modelling results}
222     \label{sec:results}
223         
224"""
225fid.write(s)
226
227# Generate latex output for location points
228s = '\\begin{table} \\begin{center} \n'
229fid.write(s)
230s = '\caption{Defined point locations for %s study area.}' %scenario_name
231fid.write(s)
232s = """
233\label{table:locations}
234\\begin{tabular}{|l|l|l|l|}\hline
235\\bf{Point Name} & \\bf{Easting} & \\bf{Northing} & \\bf{Elevation}\\\\ \hline
236"""
237fid.write(s)
238
239gauges, locations, elevation = get_gauges_from_file(project.gauge_filename)
240
241for name, gauges, elev in zip(locations, gauges, elevation):
242    east = gauges[0]
243    north = gauges[1]
244    s = '%s & %.2f & %.2f & %.2f \\\\ \hline \n' %(name.replace('_',' '), east, north, elev)
245    fid.write(s)
246
247s = '\\end{tabular} \n  \end{center} \n \end{table} \n \n'
248fid.write(s)
249
250#s = '\\begin{figure}[hbt] \n \centerline{ \includegraphics[width=\paperwidth]{../report_figures/%s}}' %gauge_map
251#fid.write(s)
252
253#s  = """
254#\caption{Point locations used for Dampier study.} 
255#\label{fig:points}
256#\end{figure}
257#"""
258#fid.write(s)
259   
260#s = '\input{interpretation} \n'
261#fid.write(s)
262
263# Assign titles to each production section
264# Must specify one name per section
265for i, name in enumerate(production_dirs.keys()):
266
267    s = '\input{%s} \n \clearpage \n \n' %max_maps[production_dirs[name]]
268    fid.write(s)
269
270# Closing
271
272
273s = """
274
275     \section{Summary}
276     \label{sec:summary}
277     \input{summary}
278
279     %\section{Acknowledgements}
280     %\input{acknowledgements}
281     
282    \input{references}
283
284    \\appendix
285   
286   \section{Metadata}
287     \label{sec:metadata}
288     \input{metadata}
289
290\clearpage
291
292   \section{Time series}
293     \label{sec:timeseries}
294"""
295fid.write(s)
296
297for i in range(len(latex_output)):
298    if latex_output[i] <> '':
299        s = '\input{%s} \n \clearpage \n \n' %latex_output[i]   
300        fid.write(s)
301
302s="""
303\end{document}
304"""
305fid.write(s)
Note: See TracBrowser for help on using the repository browser.