1 | """Class Geospatial_data - Manipulation of locations on the planet and |
---|
2 | associated attributes. |
---|
3 | |
---|
4 | """ |
---|
5 | |
---|
6 | from sys import maxint |
---|
7 | from os import access, F_OK, R_OK,remove |
---|
8 | from types import DictType |
---|
9 | from warnings import warn |
---|
10 | from string import lower |
---|
11 | from RandomArray import randint, seed, get_seed |
---|
12 | from copy import deepcopy |
---|
13 | from Scientific.IO.NetCDF import NetCDFFile |
---|
14 | |
---|
15 | import Numeric as num |
---|
16 | |
---|
17 | from anuga.coordinate_transforms.lat_long_UTM_conversion import UTMtoLL |
---|
18 | from anuga.utilities.numerical_tools import ensure_numeric |
---|
19 | from anuga.coordinate_transforms.geo_reference import Geo_reference, \ |
---|
20 | TitleError, DEFAULT_ZONE, ensure_geo_reference, write_NetCDF_georeference |
---|
21 | from anuga.coordinate_transforms.redfearn import convert_from_latlon_to_utm |
---|
22 | from anuga.utilities.anuga_exceptions import ANUGAError |
---|
23 | from anuga.config import points_file_block_line_size as MAX_READ_LINES |
---|
24 | from anuga.config import netcdf_mode_r, netcdf_mode_w, netcdf_mode_a |
---|
25 | |
---|
26 | DEFAULT_ATTRIBUTE = 'elevation' |
---|
27 | |
---|
28 | |
---|
29 | ## |
---|
30 | # @brief ?? |
---|
31 | class Geospatial_data: |
---|
32 | |
---|
33 | ## |
---|
34 | # @brief |
---|
35 | # @param data_points Mx2 iterable of tuples or array of x,y coordinates. |
---|
36 | # @param attributes Associated values for each data point. |
---|
37 | # @param geo_reference ?? |
---|
38 | # @param default_attribute_name ?? |
---|
39 | # @param file_name |
---|
40 | # @param latitudes ?? |
---|
41 | # @param longitudes ?? |
---|
42 | # @param points_are_lats_longs True if points are lat/long, not UTM. |
---|
43 | # @param max_read_lines Size of block to read, if blocking. |
---|
44 | # @param load_file_now True if blocking but we eant to read file now. |
---|
45 | # @param verbose True if this class instance is verbose. |
---|
46 | def __init__(self, |
---|
47 | data_points=None, # this can also be a points file name |
---|
48 | attributes=None, |
---|
49 | geo_reference=None, |
---|
50 | default_attribute_name=None, |
---|
51 | file_name=None, |
---|
52 | latitudes=None, |
---|
53 | longitudes=None, |
---|
54 | points_are_lats_longs=False, |
---|
55 | max_read_lines=None, |
---|
56 | load_file_now=True, |
---|
57 | verbose=False): |
---|
58 | """ |
---|
59 | Create instance from data points and associated attributes |
---|
60 | |
---|
61 | data_points: x,y coordinates in meters. Type must be either a |
---|
62 | sequence of 2-tuples or an Mx2 Numeric array of floats. A file name |
---|
63 | with extension .txt, .cvs or .pts can also be passed in here. |
---|
64 | |
---|
65 | attributes: Associated values for each data point. The type |
---|
66 | must be either a list or an array of length M or a dictionary |
---|
67 | of lists (or arrays) of length M. In the latter case the keys |
---|
68 | in the dictionary represent the attribute names, in the former |
---|
69 | the attribute will get the default name "elevation". |
---|
70 | |
---|
71 | geo_reference: Object representing the origin of the data |
---|
72 | points. It contains UTM zone, easting and northing and data |
---|
73 | points are assumed to be relative to this origin. |
---|
74 | If geo_reference is None, the default geo ref object is used. |
---|
75 | |
---|
76 | default_attribute_name: Name of default attribute to be used with |
---|
77 | get_attribute_values. The idea is that the dataset can be |
---|
78 | equipped with information about which attribute to return. |
---|
79 | If None, the default is the "first" |
---|
80 | |
---|
81 | latitudes, longitudes: Vectors of latitudes and longitudes, |
---|
82 | used to specify location instead of points. |
---|
83 | |
---|
84 | points_are_lats_longs: Set this as true if the points are actually |
---|
85 | lats and longs, not UTM |
---|
86 | |
---|
87 | max_read_lines: The number of rows read into memory when using |
---|
88 | blocking to read a file. |
---|
89 | |
---|
90 | load_file_now: If true the file is automatically loaded |
---|
91 | into the geospatial instance. Used when blocking. |
---|
92 | |
---|
93 | file_name: Name of input netCDF file or .txt file. netCDF file must |
---|
94 | have dimensions "points" etc. |
---|
95 | .txt file is a comma seperated file with x, y and attribute |
---|
96 | data. |
---|
97 | |
---|
98 | The first line has the titles of the columns. The first two |
---|
99 | column titles are checked to see if they start with lat or |
---|
100 | long (not case sensitive). If so the data is assumed to be |
---|
101 | latitude and longitude, in decimal format and converted to |
---|
102 | UTM. Otherwise the first two columns are assumed to be the x |
---|
103 | and y, and the title names acually used are ignored. |
---|
104 | |
---|
105 | |
---|
106 | The format for a .txt file is: |
---|
107 | 1st line: [column names] |
---|
108 | other lines: x y [attributes] |
---|
109 | |
---|
110 | for example: |
---|
111 | x, y, elevation, friction |
---|
112 | 0.6, 0.7, 4.9, 0.3 |
---|
113 | 1.9, 2.8, 5, 0.3 |
---|
114 | 2.7, 2.4, 5.2, 0.3 |
---|
115 | |
---|
116 | The first two columns have to be x, y or lat, long |
---|
117 | coordinates. |
---|
118 | |
---|
119 | |
---|
120 | The format for a Points dictionary is: |
---|
121 | ['pointlist'] a 2 column array describing points. 1st column x, |
---|
122 | 2nd column y. |
---|
123 | ['attributelist'], a dictionary of 1D arrays, representing |
---|
124 | attribute values at the point. The dictionary key is the attribute |
---|
125 | header. |
---|
126 | ['geo_reference'] a Geo_refernece object. Use if the point |
---|
127 | information is relative. This is optional. |
---|
128 | eg |
---|
129 | dic['pointlist'] = [[1.0,2.0],[3.0,5.0]] |
---|
130 | dic['attributelist']['elevation'] = [[7.0,5.0] |
---|
131 | |
---|
132 | verbose: |
---|
133 | |
---|
134 | """ |
---|
135 | |
---|
136 | if isinstance(data_points, basestring): |
---|
137 | # assume data point is really a file name |
---|
138 | file_name = data_points |
---|
139 | |
---|
140 | self.set_verbose(verbose) |
---|
141 | self.geo_reference = None #create the attribute |
---|
142 | self.file_name = file_name |
---|
143 | |
---|
144 | if max_read_lines is None: |
---|
145 | self.max_read_lines = MAX_READ_LINES |
---|
146 | else: |
---|
147 | self.max_read_lines = max_read_lines |
---|
148 | |
---|
149 | if file_name is None: |
---|
150 | if latitudes is not None \ |
---|
151 | or longitudes is not None \ |
---|
152 | or points_are_lats_longs: |
---|
153 | data_points, geo_reference = \ |
---|
154 | _set_using_lat_long(latitudes=latitudes, |
---|
155 | longitudes=longitudes, |
---|
156 | geo_reference=geo_reference, |
---|
157 | data_points=data_points, |
---|
158 | points_are_lats_longs=\ |
---|
159 | points_are_lats_longs) |
---|
160 | self.check_data_points(data_points) |
---|
161 | self.set_attributes(attributes) |
---|
162 | self.set_geo_reference(geo_reference) |
---|
163 | self.set_default_attribute_name(default_attribute_name) |
---|
164 | elif load_file_now is True: |
---|
165 | # watch for case where file name and points, |
---|
166 | # attributes etc are provided!! |
---|
167 | # if file name then all provided info will be removed! |
---|
168 | |
---|
169 | if verbose is True: |
---|
170 | if file_name is not None: |
---|
171 | print 'Loading Geospatial data from file: %s' % file_name |
---|
172 | |
---|
173 | self.import_points_file(file_name, verbose=verbose) |
---|
174 | |
---|
175 | self.check_data_points(self.data_points) |
---|
176 | self.set_attributes(self.attributes) |
---|
177 | self.set_geo_reference(self.geo_reference) |
---|
178 | self.set_default_attribute_name(default_attribute_name) |
---|
179 | |
---|
180 | if verbose is True: |
---|
181 | if file_name is not None: |
---|
182 | print 'Geospatial data created from file: %s' % file_name |
---|
183 | if load_file_now is False: |
---|
184 | print 'Data will be loaded blockwise on demand' |
---|
185 | |
---|
186 | if file_name.endswith('csv') or file_name.endswith('txt'): |
---|
187 | pass |
---|
188 | # This message was misleading. |
---|
189 | # FIXME (Ole): Are we blocking here or not? |
---|
190 | #print 'ASCII formats are not that great for ' |
---|
191 | #print 'blockwise reading. Consider storing this' |
---|
192 | #print 'data as a pts NetCDF format' |
---|
193 | |
---|
194 | ## |
---|
195 | # @brief Return length of the points set. |
---|
196 | def __len__(self): |
---|
197 | return len(self.data_points) |
---|
198 | |
---|
199 | ## |
---|
200 | # @brief Return a string representation of the points set. |
---|
201 | def __repr__(self): |
---|
202 | return str(self.get_data_points(absolute=True)) |
---|
203 | |
---|
204 | ## |
---|
205 | # @brief Save points data in instance. |
---|
206 | # @param data_points Points data to store in instance and check. |
---|
207 | # @note Throws ValueError exception if no data. |
---|
208 | def check_data_points(self, data_points): |
---|
209 | """Checks data points |
---|
210 | """ |
---|
211 | |
---|
212 | if data_points is None: |
---|
213 | self.data_points = None |
---|
214 | msg = 'There is no data or file provided!' |
---|
215 | raise ValueError, msg |
---|
216 | |
---|
217 | else: |
---|
218 | self.data_points = ensure_numeric(data_points) |
---|
219 | if not (0,) == self.data_points.shape: |
---|
220 | assert len(self.data_points.shape) == 2 |
---|
221 | assert self.data_points.shape[1] == 2 |
---|
222 | |
---|
223 | ## |
---|
224 | # @brief Check and assign attributes data. |
---|
225 | # @param attributes Dictionary or scalar to save as .attributes. |
---|
226 | # @note Throws exception if unable to convert dict keys to numeric. |
---|
227 | def set_attributes(self, attributes): |
---|
228 | """Check and assign attributes dictionary |
---|
229 | """ |
---|
230 | |
---|
231 | if attributes is None: |
---|
232 | self.attributes = None |
---|
233 | return |
---|
234 | |
---|
235 | if not isinstance(attributes, DictType): |
---|
236 | #Convert single attribute into dictionary |
---|
237 | attributes = {DEFAULT_ATTRIBUTE: attributes} |
---|
238 | |
---|
239 | #Check input attributes |
---|
240 | for key in attributes.keys(): |
---|
241 | try: |
---|
242 | attributes[key] = ensure_numeric(attributes[key]) |
---|
243 | except: |
---|
244 | msg = 'Attribute %s could not be converted' %key |
---|
245 | msg += 'to a numeric vector' |
---|
246 | raise msg |
---|
247 | |
---|
248 | self.attributes = attributes |
---|
249 | |
---|
250 | #def set_geo_reference(self, geo_reference): |
---|
251 | # # FIXME (Ole): Backwards compatibility - deprecate |
---|
252 | # self.setgeo_reference(geo_reference) |
---|
253 | |
---|
254 | ## |
---|
255 | # @brief Set the georeference of geospatial data. |
---|
256 | # @param geo_reference The georeference data to set. |
---|
257 | # @note Will raise exception if param not instance of Geo_reference. |
---|
258 | def set_geo_reference(self, geo_reference): |
---|
259 | """Set the georeference of geospatial data. |
---|
260 | |
---|
261 | It can also be used to change the georeference and will ensure that |
---|
262 | the absolute coordinate values are unchanged. |
---|
263 | """ |
---|
264 | |
---|
265 | from anuga.coordinate_transforms.geo_reference import Geo_reference |
---|
266 | |
---|
267 | if geo_reference is None: |
---|
268 | # Use default - points are in absolute coordinates |
---|
269 | geo_reference = Geo_reference() |
---|
270 | |
---|
271 | # Allow for tuple (zone, xllcorner, yllcorner) |
---|
272 | geo_reference = ensure_geo_reference(geo_reference) |
---|
273 | |
---|
274 | if not isinstance(geo_reference, Geo_reference): |
---|
275 | # FIXME (Ole): This exception will be raised even |
---|
276 | # if geo_reference is None. Is that the intent Duncan? |
---|
277 | msg = 'Argument geo_reference must be a valid Geo_reference ' |
---|
278 | msg += 'object or None.' |
---|
279 | raise msg |
---|
280 | |
---|
281 | # If a geo_reference already exists, change the point data according to |
---|
282 | # the new geo reference |
---|
283 | if self.geo_reference is not None: |
---|
284 | self.data_points = self.get_data_points(geo_reference=geo_reference) |
---|
285 | |
---|
286 | self.geo_reference = geo_reference |
---|
287 | |
---|
288 | ## |
---|
289 | # @brief Set default attribute name. |
---|
290 | # @param default_attribute_name The default to save. |
---|
291 | def set_default_attribute_name(self, default_attribute_name): |
---|
292 | self.default_attribute_name = default_attribute_name |
---|
293 | |
---|
294 | ## |
---|
295 | # @brief Set the instance verbose flag. |
---|
296 | # @param verbose The value to save. |
---|
297 | # @note Will raise exception if param is not True or False. |
---|
298 | def set_verbose(self, verbose=False): |
---|
299 | if verbose in [False, True]: |
---|
300 | self.verbose = verbose |
---|
301 | else: |
---|
302 | msg = 'Illegal value: %s' % str(verbose) |
---|
303 | raise Exception, msg |
---|
304 | |
---|
305 | ## |
---|
306 | # @brief Clip geospatial data by a given polygon. |
---|
307 | # @param polygon The polygon to clip with. |
---|
308 | # @param closed True if points on clip boundary are not included in result. |
---|
309 | # @param verbose True if this function is verbose. |
---|
310 | def clip(self, polygon, closed=True, verbose=False): |
---|
311 | """Clip geospatial data by a polygon |
---|
312 | |
---|
313 | Input |
---|
314 | polygon - Either a list of points, an Nx2 array or |
---|
315 | a Geospatial data object. |
---|
316 | closed - (optional) determine whether points on boundary should be |
---|
317 | regarded as belonging to the polygon (closed = True) |
---|
318 | or not (closed = False). Default is True. |
---|
319 | |
---|
320 | Output |
---|
321 | New geospatial data object representing points inside |
---|
322 | specified polygon. |
---|
323 | |
---|
324 | |
---|
325 | Note - this method is non-destructive and leaves the data in 'self' |
---|
326 | unchanged |
---|
327 | """ |
---|
328 | |
---|
329 | from anuga.utilities.polygon import inside_polygon |
---|
330 | |
---|
331 | if isinstance(polygon, Geospatial_data): |
---|
332 | # Polygon is an object - extract points |
---|
333 | polygon = polygon.get_data_points() |
---|
334 | |
---|
335 | points = self.get_data_points() |
---|
336 | inside_indices = inside_polygon(points, polygon, closed, verbose) |
---|
337 | |
---|
338 | clipped_G = self.get_sample(inside_indices) |
---|
339 | |
---|
340 | return clipped_G |
---|
341 | |
---|
342 | ## |
---|
343 | # @brief Clip points data by polygon, return points outside polygon. |
---|
344 | # @param polygon The polygon to clip with. |
---|
345 | # @param closed True if points on clip boundary are not included in result. |
---|
346 | # @param verbose True if this function is verbose. |
---|
347 | def clip_outside(self, polygon, closed=True, verbose=False): |
---|
348 | """Clip geospatial date by a polygon, keeping data OUTSIDE of polygon |
---|
349 | |
---|
350 | Input |
---|
351 | polygon - Either a list of points, an Nx2 array or |
---|
352 | a Geospatial data object. |
---|
353 | closed - (optional) determine whether points on boundary should be |
---|
354 | regarded as belonging to the polygon (closed = True) |
---|
355 | or not (closed = False). Default is True. |
---|
356 | |
---|
357 | Output |
---|
358 | Geospatial data object representing point OUTSIDE specified polygon |
---|
359 | """ |
---|
360 | |
---|
361 | from anuga.utilities.polygon import outside_polygon |
---|
362 | |
---|
363 | if isinstance(polygon, Geospatial_data): |
---|
364 | # Polygon is an object - extract points |
---|
365 | polygon = polygon.get_data_points() |
---|
366 | |
---|
367 | points = self.get_data_points() |
---|
368 | outside_indices = outside_polygon(points, polygon, closed,verbose) |
---|
369 | |
---|
370 | clipped_G = self.get_sample(outside_indices) |
---|
371 | |
---|
372 | return clipped_G |
---|
373 | |
---|
374 | ## |
---|
375 | # @brief Get instance geo_reference data. |
---|
376 | def get_geo_reference(self): |
---|
377 | return self.geo_reference |
---|
378 | |
---|
379 | ## |
---|
380 | # @brief Get coordinates for all data points as an Nx2 array. |
---|
381 | # @param absolute If True, return UTM, else relative to xll/yll corners. |
---|
382 | # @param geo_reference If supplied, points are relative to it. |
---|
383 | # @param as_lat_long If True, return points as lat/lon. |
---|
384 | # @param isSouthHemisphere If True, return lat/lon points in S.Hemi. |
---|
385 | # @return A set of data points, in appropriate form. |
---|
386 | def get_data_points(self, absolute=True, geo_reference=None, |
---|
387 | as_lat_long=False, isSouthHemisphere=True): |
---|
388 | """Get coordinates for all data points as an Nx2 array |
---|
389 | |
---|
390 | If absolute is False returned coordinates are relative to the |
---|
391 | internal georeference's xll and yll corners, otherwise |
---|
392 | absolute UTM coordinates are returned. |
---|
393 | |
---|
394 | If a geo_reference is passed the points are returned relative |
---|
395 | to that geo_reference. |
---|
396 | |
---|
397 | isSH (isSouthHemisphere) is only used when getting data |
---|
398 | points "as_lat_long" is True and if FALSE will return lats and |
---|
399 | longs valid for the Northern Hemisphere. |
---|
400 | |
---|
401 | Default: absolute is True. |
---|
402 | """ |
---|
403 | |
---|
404 | if as_lat_long is True: |
---|
405 | msg = "Points need a zone to be converted into lats and longs" |
---|
406 | assert self.geo_reference is not None, msg |
---|
407 | zone = self.geo_reference.get_zone() |
---|
408 | assert self.geo_reference.get_zone() is not DEFAULT_ZONE, msg |
---|
409 | lats_longs = [] |
---|
410 | for point in self.get_data_points(True): |
---|
411 | # UTMtoLL(northing, easting, zone, |
---|
412 | lat_calced, long_calced = UTMtoLL(point[1], point[0], |
---|
413 | zone, isSouthHemisphere) |
---|
414 | lats_longs.append((lat_calced, long_calced)) # to hash |
---|
415 | return lats_longs |
---|
416 | |
---|
417 | if absolute is True and geo_reference is None: |
---|
418 | return self.geo_reference.get_absolute(self.data_points) |
---|
419 | elif geo_reference is not None: |
---|
420 | return geo_reference.change_points_geo_ref(self.data_points, |
---|
421 | self.geo_reference) |
---|
422 | else: |
---|
423 | # If absolute is False |
---|
424 | return self.data_points |
---|
425 | |
---|
426 | ## |
---|
427 | # @brief Get value for attribute name. |
---|
428 | # @param attribute_name Name to get value for. |
---|
429 | # @note If name passed is None, return default attribute value. |
---|
430 | def get_attributes(self, attribute_name=None): |
---|
431 | """Return values for one named attribute. |
---|
432 | |
---|
433 | If attribute_name is None, default_attribute_name is used |
---|
434 | """ |
---|
435 | |
---|
436 | if attribute_name is None: |
---|
437 | if self.default_attribute_name is not None: |
---|
438 | attribute_name = self.default_attribute_name |
---|
439 | else: |
---|
440 | attribute_name = self.attributes.keys()[0] |
---|
441 | # above line takes the first one from keys |
---|
442 | |
---|
443 | if self.verbose is True: |
---|
444 | print 'Using attribute %s' %attribute_name |
---|
445 | print 'Available attributes: %s' %(self.attributes.keys()) |
---|
446 | |
---|
447 | msg = 'Attribute name %s does not exist in data set' % attribute_name |
---|
448 | assert self.attributes.has_key(attribute_name), msg |
---|
449 | |
---|
450 | return self.attributes[attribute_name] |
---|
451 | |
---|
452 | ## |
---|
453 | # @brief Get all instance attributes. |
---|
454 | # @return The instance attribute dictionary, or None if no attributes. |
---|
455 | def get_all_attributes(self): |
---|
456 | """Return values for all attributes. |
---|
457 | The return value is either None or a dictionary (possibly empty). |
---|
458 | """ |
---|
459 | |
---|
460 | return self.attributes |
---|
461 | |
---|
462 | ## |
---|
463 | # @brief Override __add__() to allow addition of geospatial objects. |
---|
464 | # @param self This object. |
---|
465 | # @param other The second object. |
---|
466 | # @return The new geospatial object. |
---|
467 | def __add__(self, other): |
---|
468 | """Returns the addition of 2 geospatical objects, |
---|
469 | objects are concatencated to the end of each other |
---|
470 | |
---|
471 | NOTE: doesn't add if objects contain different |
---|
472 | attributes |
---|
473 | |
---|
474 | Always return absolute points! |
---|
475 | This also means, that if you add None to the object, |
---|
476 | it will be turned into absolute coordinates |
---|
477 | |
---|
478 | other can be None in which case nothing is added to self. |
---|
479 | """ |
---|
480 | |
---|
481 | # find objects zone and checks if the same |
---|
482 | geo_ref1 = self.get_geo_reference() |
---|
483 | zone1 = geo_ref1.get_zone() |
---|
484 | |
---|
485 | if other is not None: |
---|
486 | geo_ref2 = other.get_geo_reference() |
---|
487 | zone2 = geo_ref2.get_zone() |
---|
488 | geo_ref1.reconcile_zones(geo_ref2) |
---|
489 | new_points = num.concatenate((self.get_data_points(absolute=True), |
---|
490 | other.get_data_points(absolute=True)), |
---|
491 | axis = 0) |
---|
492 | |
---|
493 | # Concatenate attributes if any |
---|
494 | if self.attributes is None: |
---|
495 | if other.attributes is not None: |
---|
496 | msg = 'Geospatial data must have the same ' |
---|
497 | msg += 'attributes to allow addition.' |
---|
498 | raise Exception, msg |
---|
499 | |
---|
500 | new_attributes = None |
---|
501 | else: |
---|
502 | new_attributes = {} |
---|
503 | for x in self.attributes.keys(): |
---|
504 | if other.attributes.has_key(x): |
---|
505 | attrib1 = self.attributes[x] |
---|
506 | attrib2 = other.attributes[x] |
---|
507 | new_attributes[x] = num.concatenate((attrib1, attrib2)) |
---|
508 | else: |
---|
509 | msg = 'Geospatial data must have the same \n' |
---|
510 | msg += 'attributes to allow addition.' |
---|
511 | raise Exception, msg |
---|
512 | else: |
---|
513 | #other is None: |
---|
514 | new_points = self.get_data_points(absolute=True) |
---|
515 | new_attributes = self.attributes |
---|
516 | |
---|
517 | # Instantiate new data object and return absolute coordinates |
---|
518 | new_geo_ref = Geo_reference(geo_ref1.get_zone(), 0.0, 0.0) |
---|
519 | return Geospatial_data(new_points, new_attributes, new_geo_ref) |
---|
520 | |
---|
521 | ## |
---|
522 | # @brief Override the addition case where LHS isn't geospatial object. |
---|
523 | # @param self This object. |
---|
524 | # @param other The second object. |
---|
525 | # @return The new geospatial object. |
---|
526 | def __radd__(self, other): |
---|
527 | """Handle cases like None + Geospatial_data(...) |
---|
528 | """ |
---|
529 | |
---|
530 | return self + other |
---|
531 | |
---|
532 | ### |
---|
533 | # IMPORT/EXPORT POINTS FILES |
---|
534 | ### |
---|
535 | |
---|
536 | ## |
---|
537 | # @brief Import a .txt, .csv or .pts points data file. |
---|
538 | # @param file_name |
---|
539 | # @param delimiter |
---|
540 | # @param verbose True if this function is to be verbose. |
---|
541 | # @note Will throw IOError or SyntaxError if there is a problem. |
---|
542 | def import_points_file(self, file_name, delimiter=None, verbose=False): |
---|
543 | """ load an .txt, .csv or .pts file |
---|
544 | Note: will throw an IOError/SyntaxError if it can't load the file. |
---|
545 | Catch these! |
---|
546 | |
---|
547 | Post condition: self.attributes dictionary has been set |
---|
548 | """ |
---|
549 | |
---|
550 | if access(file_name, F_OK) == 0 : |
---|
551 | msg = 'File %s does not exist or is not accessible' % file_name |
---|
552 | raise IOError, msg |
---|
553 | |
---|
554 | attributes = {} |
---|
555 | if file_name[-4:]== ".pts": |
---|
556 | try: |
---|
557 | data_points, attributes, geo_reference = \ |
---|
558 | _read_pts_file(file_name, verbose) |
---|
559 | except IOError, e: |
---|
560 | msg = 'Could not open file %s ' % file_name |
---|
561 | raise IOError, msg |
---|
562 | elif file_name[-4:]== ".txt" or file_name[-4:]== ".csv": |
---|
563 | try: |
---|
564 | data_points, attributes, geo_reference = \ |
---|
565 | _read_csv_file(file_name, verbose) |
---|
566 | except IOError, e: |
---|
567 | # This should only be if a file is not found |
---|
568 | msg = 'Could not open file %s. ' % file_name |
---|
569 | msg += 'Check the file location.' |
---|
570 | raise IOError, msg |
---|
571 | except SyntaxError, e: |
---|
572 | # This should only be if there is a format error |
---|
573 | msg = 'Could not open file %s. \n' %file_name |
---|
574 | msg += Error_message['IOError'] |
---|
575 | raise SyntaxError, msg |
---|
576 | else: |
---|
577 | msg = 'Extension %s is unknown' %file_name[-4:] |
---|
578 | raise IOError, msg |
---|
579 | |
---|
580 | self.data_points = data_points |
---|
581 | self.attributes = attributes |
---|
582 | self.geo_reference = geo_reference |
---|
583 | |
---|
584 | ## |
---|
585 | # @brief Write points data to a file (.csv or .pts). |
---|
586 | # @param file_name Path to file to write. |
---|
587 | # @param absolute ?? |
---|
588 | # @param as_lat_long ?? |
---|
589 | # @param isSouthHemisphere ?? |
---|
590 | def export_points_file(self, file_name, absolute=True, |
---|
591 | as_lat_long=False, isSouthHemisphere=True): |
---|
592 | |
---|
593 | """ |
---|
594 | write a points file, file_name, as a text (.csv) or binary (.pts) file |
---|
595 | file_name is the file name, including the extension |
---|
596 | The point_dict is defined at the top of this file. |
---|
597 | |
---|
598 | If absolute is True data the xll and yll are added to the points value |
---|
599 | and the xll and yll of the geo_reference are set to 0. |
---|
600 | |
---|
601 | If absolute is False data points at returned as relative to the xll |
---|
602 | and yll and geo_reference remains uneffected |
---|
603 | |
---|
604 | isSouthHemisphere: is only used when getting data |
---|
605 | points "as_lat_long" is True and if FALSE will return lats and |
---|
606 | longs valid for the Northern Hemisphere. |
---|
607 | """ |
---|
608 | |
---|
609 | if (file_name[-4:] == ".pts"): |
---|
610 | if absolute is True: |
---|
611 | geo_ref = deepcopy(self.geo_reference) |
---|
612 | geo_ref.xllcorner = 0 |
---|
613 | geo_ref.yllcorner = 0 |
---|
614 | _write_pts_file(file_name, |
---|
615 | self.get_data_points(absolute), |
---|
616 | self.get_all_attributes(), |
---|
617 | geo_ref) |
---|
618 | else: |
---|
619 | _write_pts_file(file_name, |
---|
620 | self.get_data_points(absolute), |
---|
621 | self.get_all_attributes(), |
---|
622 | self.get_geo_reference()) |
---|
623 | |
---|
624 | elif file_name[-4:] == ".txt" or file_name[-4:] == ".csv": |
---|
625 | msg = "ERROR: trying to write a .txt file with relative data." |
---|
626 | assert absolute, msg |
---|
627 | _write_csv_file(file_name, |
---|
628 | self.get_data_points(absolute=True, |
---|
629 | as_lat_long=as_lat_long, |
---|
630 | isSouthHemisphere=isSouthHemisphere), |
---|
631 | self.get_all_attributes(), |
---|
632 | as_lat_long=as_lat_long) |
---|
633 | |
---|
634 | elif file_name[-4:] == ".urs" : |
---|
635 | msg = "ERROR: Can not write a .urs file as a relative file." |
---|
636 | assert absolute, msg |
---|
637 | _write_urs_file(file_name, |
---|
638 | self.get_data_points(as_lat_long=True, |
---|
639 | isSouthHemisphere=isSouthHemisphere)) |
---|
640 | |
---|
641 | else: |
---|
642 | msg = 'Unknown file type %s ' %file_name |
---|
643 | raise IOError, msg |
---|
644 | |
---|
645 | ## |
---|
646 | # @brief Get a subset of data that is referred to by 'indices'. |
---|
647 | # @param indices A list of indices to select data subset with. |
---|
648 | # @return A geospatial object containing data subset. |
---|
649 | def get_sample(self, indices): |
---|
650 | """ Returns a object which is a subset of the original |
---|
651 | and the data points and attributes in this new object refer to |
---|
652 | the indices provided |
---|
653 | |
---|
654 | Input |
---|
655 | indices- a list of integers that represent the new object |
---|
656 | Output |
---|
657 | New geospatial data object representing points specified by |
---|
658 | the indices |
---|
659 | """ |
---|
660 | |
---|
661 | #FIXME: add the geo_reference to this |
---|
662 | points = self.get_data_points() |
---|
663 | sampled_points = num.take(points, indices) |
---|
664 | |
---|
665 | attributes = self.get_all_attributes() |
---|
666 | |
---|
667 | sampled_attributes = {} |
---|
668 | if attributes is not None: |
---|
669 | for key, att in attributes.items(): |
---|
670 | sampled_attributes[key] = num.take(att, indices) |
---|
671 | |
---|
672 | return Geospatial_data(sampled_points, sampled_attributes) |
---|
673 | |
---|
674 | ## |
---|
675 | # @brief Split one geospatial object into two. |
---|
676 | # @param factor Relative size to make first result object. |
---|
677 | # @param seed_num Random 'seed' - used only for unit test. |
---|
678 | # @param verbose True if this function is to be verbose. |
---|
679 | # @note Points in each result object are selected randomly. |
---|
680 | def split(self, factor=0.5, seed_num=None, verbose=False): |
---|
681 | """Returns two |
---|
682 | geospatial_data object, first is the size of the 'factor' |
---|
683 | smaller the original and the second is the remainder. The two |
---|
684 | new object are disjoin set of each other. |
---|
685 | |
---|
686 | Points of the two new object have selected RANDOMLY. |
---|
687 | |
---|
688 | This method create two lists of indices which are passed into |
---|
689 | get_sample. The lists are created using random numbers, and |
---|
690 | they are unique sets eg. total_list(1,2,3,4,5,6,7,8,9) |
---|
691 | random_list(1,3,6,7,9) and remainder_list(0,2,4,5,8) |
---|
692 | |
---|
693 | Input - the factor which to split the object, if 0.1 then 10% of the |
---|
694 | together object will be returned |
---|
695 | |
---|
696 | Output - two geospatial_data objects that are disjoint sets of the |
---|
697 | original |
---|
698 | """ |
---|
699 | |
---|
700 | i=0 |
---|
701 | self_size = len(self) |
---|
702 | random_list = [] |
---|
703 | remainder_list = [] |
---|
704 | new_size = round(factor*self_size) |
---|
705 | |
---|
706 | # Find unique random numbers |
---|
707 | if verbose: print "make unique random number list and get indices" |
---|
708 | |
---|
709 | total=num.array(range(self_size), num.Int) #array default# |
---|
710 | total_list = total.tolist() |
---|
711 | |
---|
712 | if verbose: print "total list len", len(total_list) |
---|
713 | |
---|
714 | # There will be repeated random numbers however will not be a |
---|
715 | # problem as they are being 'pop'ed out of array so if there |
---|
716 | # are two numbers the same they will pop different indicies, |
---|
717 | # still basically random |
---|
718 | ## create list of non-unquie random numbers |
---|
719 | if verbose: print "create random numbers list %s long" %new_size |
---|
720 | |
---|
721 | # Set seed if provided, mainly important for unit test! |
---|
722 | # plus recalcule seed when no seed provided. |
---|
723 | if seed_num != None: |
---|
724 | seed(seed_num, seed_num) |
---|
725 | else: |
---|
726 | seed() |
---|
727 | |
---|
728 | if verbose: print "seed:", get_seed() |
---|
729 | |
---|
730 | random_num = randint(0, self_size-1, (int(new_size),)) |
---|
731 | random_num = random_num.tolist() |
---|
732 | |
---|
733 | #need to sort and reverse so the pop() works correctly |
---|
734 | random_num.sort() |
---|
735 | random_num.reverse() |
---|
736 | |
---|
737 | if verbose: print "make random number list and get indices" |
---|
738 | |
---|
739 | j=0 |
---|
740 | k=1 |
---|
741 | remainder_list = total_list[:] |
---|
742 | |
---|
743 | # pops array index (random_num) from remainder_list |
---|
744 | # (which starts as the |
---|
745 | # total_list and appends to random_list |
---|
746 | random_num_len = len(random_num) |
---|
747 | for i in random_num: |
---|
748 | random_list.append(remainder_list.pop(i)) |
---|
749 | j += 1 |
---|
750 | #prints progress |
---|
751 | if verbose and round(random_num_len/10*k) == j: |
---|
752 | print '(%s/%s)' % (j, random_num_len) |
---|
753 | k += 1 |
---|
754 | |
---|
755 | # FIXME: move to tests, it might take a long time |
---|
756 | # then create an array of random lenght between 500 and 1000, |
---|
757 | # and use a random factor between 0 and 1 |
---|
758 | # setup for assertion |
---|
759 | test_total = random_list[:] |
---|
760 | test_total.extend(remainder_list) |
---|
761 | test_total.sort() |
---|
762 | msg = 'The two random lists made from the original list when added ' \ |
---|
763 | 'together DO NOT equal the original list' |
---|
764 | assert total_list == test_total, msg |
---|
765 | |
---|
766 | # Get new samples |
---|
767 | if verbose: print "get values of indices for random list" |
---|
768 | G1 = self.get_sample(random_list) |
---|
769 | if verbose: print "get values of indices for opposite of random list" |
---|
770 | G2 = self.get_sample(remainder_list) |
---|
771 | |
---|
772 | return G1, G2 |
---|
773 | |
---|
774 | ## |
---|
775 | # @brief Allow iteration over this object. |
---|
776 | def __iter__(self): |
---|
777 | """Read in the header, number_of_points and save the |
---|
778 | file pointer position |
---|
779 | """ |
---|
780 | from Scientific.IO.NetCDF import NetCDFFile |
---|
781 | #FIXME - what to do if the file isn't there |
---|
782 | |
---|
783 | # FIXME (Ole): Shouldn't this go into the constructor? |
---|
784 | # This method acts like the constructor when blocking. |
---|
785 | # ... and shouldn't it be called block_size? |
---|
786 | # |
---|
787 | if self.max_read_lines is None: |
---|
788 | self.max_read_lines = MAX_READ_LINES |
---|
789 | |
---|
790 | if self.file_name[-4:] == ".pts": |
---|
791 | # See if the file is there. Throw a QUIET IO error if it isn't |
---|
792 | fd = open(self.file_name,'r') |
---|
793 | fd.close() |
---|
794 | |
---|
795 | # Throws prints to screen if file not present |
---|
796 | self.fid = NetCDFFile(self.file_name, netcdf_mode_r) |
---|
797 | |
---|
798 | (self.blocking_georef, |
---|
799 | self.blocking_keys, |
---|
800 | self.number_of_points) = _read_pts_file_header(self.fid, |
---|
801 | self.verbose) |
---|
802 | self.start_row = 0 |
---|
803 | self.last_row = self.number_of_points |
---|
804 | self.show_verbose = 0 |
---|
805 | self.verbose_block_size = (self.last_row + 10)/10 |
---|
806 | self.block_number = 0 |
---|
807 | self.number_of_blocks = self.number_of_points/self.max_read_lines |
---|
808 | # This computes the number of full blocks. The last block may be |
---|
809 | # smaller and won't be ircluded in this estimate. |
---|
810 | |
---|
811 | if self.verbose is True: |
---|
812 | print 'Reading %d points (in ~%d blocks) from file %s. ' \ |
---|
813 | % (self.number_of_points, |
---|
814 | self.number_of_blocks, |
---|
815 | self.file_name), |
---|
816 | print 'Each block consists of %d data points' \ |
---|
817 | % self.max_read_lines |
---|
818 | |
---|
819 | else: |
---|
820 | # Assume the file is a csv file |
---|
821 | file_pointer = open(self.file_name) |
---|
822 | self.header, self.file_pointer = _read_csv_file_header(file_pointer) |
---|
823 | self.blocking_georef = None # Used for reconciling zones |
---|
824 | |
---|
825 | return self |
---|
826 | |
---|
827 | ## |
---|
828 | # @brief Read another block into the instance. |
---|
829 | def next(self): |
---|
830 | """read a block, instanciate a new geospatial and return it""" |
---|
831 | |
---|
832 | if self.file_name[-4:] == ".pts": |
---|
833 | if self.start_row == self.last_row: |
---|
834 | # Read the end of the file last iteration |
---|
835 | # Remove blocking attributes |
---|
836 | self.fid.close() |
---|
837 | del self.max_read_lines |
---|
838 | del self.blocking_georef |
---|
839 | del self.last_row |
---|
840 | del self.start_row |
---|
841 | del self.blocking_keys |
---|
842 | del self.fid |
---|
843 | raise StopIteration |
---|
844 | fin_row = self.start_row + self.max_read_lines |
---|
845 | if fin_row > self.last_row: |
---|
846 | fin_row = self.last_row |
---|
847 | |
---|
848 | if self.verbose is True: |
---|
849 | if self.show_verbose >= self.start_row \ |
---|
850 | and self.show_verbose < fin_row: |
---|
851 | print 'Reading block %d (points %d to %d) out of %d'\ |
---|
852 | %(self.block_number, |
---|
853 | self.start_row, |
---|
854 | fin_row, |
---|
855 | self.number_of_blocks) |
---|
856 | |
---|
857 | self.show_verbose += max(self.max_read_lines, |
---|
858 | self.verbose_block_size) |
---|
859 | |
---|
860 | |
---|
861 | # Read next block |
---|
862 | pointlist, att_dict, = _read_pts_file_blocking(self.fid, |
---|
863 | self.start_row, |
---|
864 | fin_row, |
---|
865 | self.blocking_keys) |
---|
866 | |
---|
867 | geo = Geospatial_data(pointlist, att_dict, self.blocking_georef) |
---|
868 | self.start_row = fin_row |
---|
869 | |
---|
870 | self.block_number += 1 |
---|
871 | |
---|
872 | else: |
---|
873 | # Assume the file is a csv file |
---|
874 | try: |
---|
875 | (pointlist, |
---|
876 | att_dict, |
---|
877 | geo_ref, |
---|
878 | self.file_pointer) = \ |
---|
879 | _read_csv_file_blocking(self.file_pointer, |
---|
880 | self.header[:], |
---|
881 | max_read_lines=\ |
---|
882 | self.max_read_lines, |
---|
883 | verbose=self.verbose) |
---|
884 | |
---|
885 | # Check that the zones haven't changed. |
---|
886 | if geo_ref is not None: |
---|
887 | geo_ref.reconcile_zones(self.blocking_georef) |
---|
888 | self.blocking_georef = geo_ref |
---|
889 | elif self.blocking_georef is not None: |
---|
890 | msg = 'Geo reference given, then not given.' |
---|
891 | msg += ' This should not happen.' |
---|
892 | raise ValueError, msg |
---|
893 | geo = Geospatial_data(pointlist, att_dict, geo_ref) |
---|
894 | except StopIteration: |
---|
895 | self.file_pointer.close() |
---|
896 | del self.header |
---|
897 | del self.file_pointer |
---|
898 | raise StopIteration |
---|
899 | except ANUGAError: |
---|
900 | self.file_pointer.close() |
---|
901 | del self.header |
---|
902 | del self.file_pointer |
---|
903 | raise |
---|
904 | except SyntaxError: |
---|
905 | self.file_pointer.close() |
---|
906 | del self.header |
---|
907 | del self.file_pointer |
---|
908 | # This should only be if there is a format error |
---|
909 | msg = 'Could not open file %s. \n' % self.file_name |
---|
910 | msg += Error_message['IOError'] |
---|
911 | raise SyntaxError, msg |
---|
912 | return geo |
---|
913 | |
---|
914 | |
---|
915 | ##################### Error messages ########### |
---|
916 | Error_message = {} |
---|
917 | Em = Error_message |
---|
918 | Em['IOError'] = "NOTE: The format for a comma separated .txt/.csv file is:\n" |
---|
919 | Em['IOError'] += " 1st line: [column names]\n" |
---|
920 | Em['IOError'] += " other lines: [x value], [y value], [attributes]\n" |
---|
921 | Em['IOError'] += "\n" |
---|
922 | Em['IOError'] += " for example:\n" |
---|
923 | Em['IOError'] += " x, y, elevation, friction\n" |
---|
924 | Em['IOError'] += " 0.6, 0.7, 4.9, 0.3\n" |
---|
925 | Em['IOError'] += " 1.9, 2.8, 5, 0.3\n" |
---|
926 | Em['IOError'] += " 2.7, 2.4, 5.2, 0.3\n" |
---|
927 | Em['IOError'] += "\n" |
---|
928 | Em['IOError'] += "The first two columns are assumed to be x, y coordinates.\n" |
---|
929 | Em['IOError'] += "The attribute values must be numeric.\n" |
---|
930 | |
---|
931 | ## |
---|
932 | # @brief ?? |
---|
933 | # @param latitudes ?? |
---|
934 | # @param longitudes ?? |
---|
935 | # @param geo_reference ?? |
---|
936 | # @param data_points ?? |
---|
937 | # @param points_are_lats_longs ?? |
---|
938 | def _set_using_lat_long(latitudes, |
---|
939 | longitudes, |
---|
940 | geo_reference, |
---|
941 | data_points, |
---|
942 | points_are_lats_longs): |
---|
943 | """ |
---|
944 | if the points has lat long info, assume it is in (lat, long) order. |
---|
945 | """ |
---|
946 | |
---|
947 | if geo_reference is not None: |
---|
948 | msg = "A georeference is specified yet latitude and longitude " \ |
---|
949 | "are also specified!" |
---|
950 | raise ValueError, msg |
---|
951 | |
---|
952 | if data_points is not None and not points_are_lats_longs: |
---|
953 | msg = "Data points are specified yet latitude and longitude are " \ |
---|
954 | "also specified." |
---|
955 | raise ValueError, msg |
---|
956 | |
---|
957 | if points_are_lats_longs: |
---|
958 | if data_points is None: |
---|
959 | msg = "Data points are not specified." |
---|
960 | raise ValueError, msg |
---|
961 | lats_longs = ensure_numeric(data_points) |
---|
962 | latitudes = num.ravel(lats_longs[:,0:1]) |
---|
963 | longitudes = num.ravel(lats_longs[:,1:]) |
---|
964 | |
---|
965 | if latitudes is None and longitudes is None: |
---|
966 | msg = "Latitudes and Longitudes are not specified." |
---|
967 | raise ValueError, msg |
---|
968 | |
---|
969 | if latitudes is None: |
---|
970 | msg = "Longitudes are specified yet latitudes aren't." |
---|
971 | raise ValueError, msg |
---|
972 | |
---|
973 | if longitudes is None: |
---|
974 | msg = "Latitudes are specified yet longitudes aren't." |
---|
975 | raise ValueError, msg |
---|
976 | |
---|
977 | data_points, zone = convert_from_latlon_to_utm(latitudes=latitudes, |
---|
978 | longitudes=longitudes) |
---|
979 | return data_points, Geo_reference(zone=zone) |
---|
980 | |
---|
981 | |
---|
982 | ## |
---|
983 | # @brief Read a .pts data file. |
---|
984 | # @param file_name Path to file to read. |
---|
985 | # @param verbose True if this function is to be verbose. |
---|
986 | # @return (pointlist, attributes, geo_reference) |
---|
987 | def _read_pts_file(file_name, verbose=False): |
---|
988 | """Read .pts NetCDF file |
---|
989 | |
---|
990 | Return a dic of array of points, and dic of array of attribute |
---|
991 | eg |
---|
992 | dic['points'] = [[1.0,2.0],[3.0,5.0]] |
---|
993 | dic['attributelist']['elevation'] = [[7.0,5.0]] |
---|
994 | """ |
---|
995 | |
---|
996 | from Scientific.IO.NetCDF import NetCDFFile |
---|
997 | |
---|
998 | if verbose: print 'Reading ', file_name |
---|
999 | |
---|
1000 | # See if the file is there. Throw a QUIET IO error if it isn't |
---|
1001 | fd = open(file_name,'r') |
---|
1002 | fd.close() |
---|
1003 | |
---|
1004 | # Throws prints to screen if file not present |
---|
1005 | fid = NetCDFFile(file_name, netcdf_mode_r) |
---|
1006 | |
---|
1007 | pointlist = num.array(fid.variables['points']) |
---|
1008 | keys = fid.variables.keys() |
---|
1009 | |
---|
1010 | if verbose: print 'Got %d variables: %s' % (len(keys), keys) |
---|
1011 | |
---|
1012 | try: |
---|
1013 | keys.remove('points') |
---|
1014 | except IOError, e: |
---|
1015 | fid.close() |
---|
1016 | msg = "Expected keyword 'points' but could not find it" |
---|
1017 | raise IOError, msg |
---|
1018 | |
---|
1019 | attributes = {} |
---|
1020 | for key in keys: |
---|
1021 | if verbose: print "reading attribute '%s'" % key |
---|
1022 | |
---|
1023 | attributes[key] = num.array(fid.variables[key]) |
---|
1024 | |
---|
1025 | try: |
---|
1026 | geo_reference = Geo_reference(NetCDFObject=fid) |
---|
1027 | except AttributeError, e: |
---|
1028 | geo_reference = None |
---|
1029 | |
---|
1030 | fid.close() |
---|
1031 | |
---|
1032 | return pointlist, attributes, geo_reference |
---|
1033 | |
---|
1034 | |
---|
1035 | ## |
---|
1036 | # @brief Read a .csv data file. |
---|
1037 | # @param file_name Path to the .csv file to read. |
---|
1038 | # @param verbose True if this function is to be verbose. |
---|
1039 | def _read_csv_file(file_name, verbose=False): |
---|
1040 | """Read .csv file |
---|
1041 | |
---|
1042 | Return a dic of array of points, and dic of array of attribute |
---|
1043 | eg |
---|
1044 | dic['points'] = [[1.0,2.0],[3.0,5.0]] |
---|
1045 | dic['attributelist']['elevation'] = [[7.0,5.0]] |
---|
1046 | """ |
---|
1047 | |
---|
1048 | file_pointer = open(file_name) |
---|
1049 | header, file_pointer = _read_csv_file_header(file_pointer) |
---|
1050 | try: |
---|
1051 | (pointlist, |
---|
1052 | att_dict, |
---|
1053 | geo_ref, |
---|
1054 | file_pointer) = _read_csv_file_blocking(file_pointer, |
---|
1055 | header, |
---|
1056 | max_read_lines=1e30) |
---|
1057 | #If the file is bigger that this, block.. |
---|
1058 | # FIXME (Ole) What's up here? |
---|
1059 | except ANUGAError: |
---|
1060 | file_pointer.close() |
---|
1061 | raise |
---|
1062 | |
---|
1063 | file_pointer.close() |
---|
1064 | |
---|
1065 | return pointlist, att_dict, geo_ref |
---|
1066 | |
---|
1067 | |
---|
1068 | ## |
---|
1069 | # @brief Read a .csv file header. |
---|
1070 | # @param file_pointer Open descriptor of the file to read. |
---|
1071 | # @param delimiter Header line delimiter string, split on this string. |
---|
1072 | # @param verbose True if this function is to be verbose. |
---|
1073 | # @return A tuple of (<cleaned header string>, <input file_pointer>) |
---|
1074 | |
---|
1075 | CSV_DELIMITER = ',' |
---|
1076 | |
---|
1077 | def _read_csv_file_header(file_pointer, |
---|
1078 | delimiter=CSV_DELIMITER, |
---|
1079 | verbose=False): |
---|
1080 | """Read the header of a .csv file |
---|
1081 | Return a list of the header names |
---|
1082 | """ |
---|
1083 | |
---|
1084 | line = file_pointer.readline() |
---|
1085 | header = clean_line(line, delimiter) |
---|
1086 | |
---|
1087 | return header, file_pointer |
---|
1088 | |
---|
1089 | ## |
---|
1090 | # @brief Read a .csv file, with blocking. |
---|
1091 | # @param file_pointer Open descriptor of the file to read. |
---|
1092 | # @param header List of already read .csv header fields. |
---|
1093 | # @param delimiter Delimiter string header was split on. |
---|
1094 | # @param max_read_lines The max number of lines to read before blocking. |
---|
1095 | # @param verbose True if this function is to be verbose. |
---|
1096 | # @note Will throw IndexError, SyntaxError exceptions. |
---|
1097 | def _read_csv_file_blocking(file_pointer, header, |
---|
1098 | delimiter=CSV_DELIMITER, |
---|
1099 | max_read_lines=MAX_READ_LINES, |
---|
1100 | verbose=False): |
---|
1101 | """Read the body of a .csv file. |
---|
1102 | header: The list header of the csv file, with the x and y labels. |
---|
1103 | """ |
---|
1104 | |
---|
1105 | points = [] |
---|
1106 | pointattributes = [] |
---|
1107 | att_dict = {} |
---|
1108 | |
---|
1109 | # This is to remove the x and y headers. |
---|
1110 | header = header[:] |
---|
1111 | try: |
---|
1112 | x_header = header.pop(0) |
---|
1113 | y_header = header.pop(0) |
---|
1114 | except IndexError: |
---|
1115 | # if there are not two columns this will occur. |
---|
1116 | # eg if it is a space seperated file |
---|
1117 | raise SyntaxError |
---|
1118 | |
---|
1119 | read_lines = 0 |
---|
1120 | while read_lines < max_read_lines: |
---|
1121 | line = file_pointer.readline() |
---|
1122 | numbers = clean_line(line, delimiter) |
---|
1123 | if len(numbers) <= 1: |
---|
1124 | break |
---|
1125 | if line[0] == '#': |
---|
1126 | continue |
---|
1127 | |
---|
1128 | read_lines += 1 |
---|
1129 | |
---|
1130 | try: |
---|
1131 | x = float(numbers[0]) |
---|
1132 | y = float(numbers[1]) |
---|
1133 | points.append([x,y]) |
---|
1134 | numbers.pop(0) |
---|
1135 | numbers.pop(0) |
---|
1136 | if len(header) != len(numbers): |
---|
1137 | file_pointer.close() |
---|
1138 | msg = "File load error. " \ |
---|
1139 | "There might be a problem with the file header." |
---|
1140 | raise SyntaxError, msg |
---|
1141 | for i,n in enumerate(numbers): |
---|
1142 | n.strip() |
---|
1143 | if n != '\n' and n != '': |
---|
1144 | #attributes.append(float(n)) |
---|
1145 | att_dict.setdefault(header[i],[]).append(float(n)) |
---|
1146 | except ValueError: |
---|
1147 | raise SyntaxError |
---|
1148 | |
---|
1149 | if points == []: |
---|
1150 | raise StopIteration |
---|
1151 | |
---|
1152 | pointlist = num.array(points, num.Float) |
---|
1153 | for key in att_dict.keys(): |
---|
1154 | att_dict[key] = num.array(att_dict[key], num.Float) |
---|
1155 | |
---|
1156 | # Do stuff here so the info is in lat's and longs |
---|
1157 | geo_ref = None |
---|
1158 | x_header = lower(x_header[:3]) |
---|
1159 | y_header = lower(y_header[:3]) |
---|
1160 | if (x_header == 'lon' or x_header == 'lat') \ |
---|
1161 | and (y_header == 'lon' or y_header == 'lat'): |
---|
1162 | if x_header == 'lon': |
---|
1163 | longitudes = num.ravel(pointlist[:,0:1]) |
---|
1164 | latitudes = num.ravel(pointlist[:,1:]) |
---|
1165 | else: |
---|
1166 | latitudes = num.ravel(pointlist[:,0:1]) |
---|
1167 | longitudes = num.ravel(pointlist[:,1:]) |
---|
1168 | |
---|
1169 | pointlist, geo_ref = _set_using_lat_long(latitudes, |
---|
1170 | longitudes, |
---|
1171 | geo_reference=None, |
---|
1172 | data_points=None, |
---|
1173 | points_are_lats_longs=False) |
---|
1174 | |
---|
1175 | return pointlist, att_dict, geo_ref, file_pointer |
---|
1176 | |
---|
1177 | |
---|
1178 | ## |
---|
1179 | # @brief Read a .pts file header. |
---|
1180 | # @param fid Handle to the open .pts file. |
---|
1181 | # @param verbose True if the function is to be verbose. |
---|
1182 | # @return (geo_reference, keys, fid.dimensions['number_of_points']) |
---|
1183 | # @note Will throw IOError and AttributeError exceptions. |
---|
1184 | def _read_pts_file_header(fid, verbose=False): |
---|
1185 | """Read the geo_reference and number_of_points from a .pts file |
---|
1186 | """ |
---|
1187 | |
---|
1188 | keys = fid.variables.keys() |
---|
1189 | try: |
---|
1190 | keys.remove('points') |
---|
1191 | except IOError, e: |
---|
1192 | fid.close() |
---|
1193 | msg = "Expected keyword 'points' but could not find it." |
---|
1194 | raise IOError, msg |
---|
1195 | |
---|
1196 | if verbose: print 'Got %d variables: %s' % (len(keys), keys) |
---|
1197 | |
---|
1198 | try: |
---|
1199 | geo_reference = Geo_reference(NetCDFObject=fid) |
---|
1200 | except AttributeError, e: |
---|
1201 | geo_reference = None |
---|
1202 | |
---|
1203 | return geo_reference, keys, fid.dimensions['number_of_points'] |
---|
1204 | |
---|
1205 | |
---|
1206 | ## |
---|
1207 | # @brief Read the body of a .csf file, with blocking. |
---|
1208 | # @param fid Handle to already open file. |
---|
1209 | # @param start_row Start row index of points to return. |
---|
1210 | # @param fin_row End row index of points to return. |
---|
1211 | # @param keys Iterable of keys to return. |
---|
1212 | # @return Tuple of (pointlist, attributes). |
---|
1213 | def _read_pts_file_blocking(fid, start_row, fin_row, keys): |
---|
1214 | """Read the body of a .csv file. |
---|
1215 | """ |
---|
1216 | |
---|
1217 | pointlist = num.array(fid.variables['points'][start_row:fin_row]) |
---|
1218 | |
---|
1219 | attributes = {} |
---|
1220 | for key in keys: |
---|
1221 | attributes[key] = num.array(fid.variables[key][start_row:fin_row]) |
---|
1222 | |
---|
1223 | return pointlist, attributes |
---|
1224 | |
---|
1225 | |
---|
1226 | ## |
---|
1227 | # @brief Write a .pts data file. |
---|
1228 | # @param file_name Path to the file to write. |
---|
1229 | # @param write_data_points Data points to write. |
---|
1230 | # @param write_attributes Attributes to write. |
---|
1231 | # @param write_geo_reference Georef to write. |
---|
1232 | def _write_pts_file(file_name, |
---|
1233 | write_data_points, |
---|
1234 | write_attributes=None, |
---|
1235 | write_geo_reference=None): |
---|
1236 | """Write .pts NetCDF file |
---|
1237 | |
---|
1238 | NOTE: Below might not be valid ask Duncan : NB 5/2006 |
---|
1239 | |
---|
1240 | WARNING: This function mangles the point_atts data structure |
---|
1241 | #F??ME: (DSG)This format has issues. |
---|
1242 | # There can't be an attribute called points |
---|
1243 | # consider format change |
---|
1244 | # method changed by NB not sure if above statement is correct |
---|
1245 | |
---|
1246 | should create new test for this |
---|
1247 | legal_keys = ['pointlist', 'attributelist', 'geo_reference'] |
---|
1248 | for key in point_atts.keys(): |
---|
1249 | msg = 'Key %s is illegal. Valid keys are %s' %(key, legal_keys) |
---|
1250 | assert key in legal_keys, msg |
---|
1251 | """ |
---|
1252 | |
---|
1253 | from Scientific.IO.NetCDF import NetCDFFile |
---|
1254 | |
---|
1255 | # NetCDF file definition |
---|
1256 | outfile = NetCDFFile(file_name, netcdf_mode_w) |
---|
1257 | |
---|
1258 | # Create new file |
---|
1259 | outfile.institution = 'Geoscience Australia' |
---|
1260 | outfile.description = 'NetCDF format for compact and portable storage ' \ |
---|
1261 | 'of spatial point data' |
---|
1262 | |
---|
1263 | # Dimension definitions |
---|
1264 | shape = write_data_points.shape[0] |
---|
1265 | outfile.createDimension('number_of_points', shape) |
---|
1266 | outfile.createDimension('number_of_dimensions', 2) #This is 2d data |
---|
1267 | |
---|
1268 | # Variable definition |
---|
1269 | outfile.createVariable('points', num.Float, ('number_of_points', |
---|
1270 | 'number_of_dimensions')) |
---|
1271 | |
---|
1272 | #create variables |
---|
1273 | outfile.variables['points'][:] = write_data_points #.astype(Float32) |
---|
1274 | |
---|
1275 | if write_attributes is not None: |
---|
1276 | for key in write_attributes.keys(): |
---|
1277 | outfile.createVariable(key, num.Float, ('number_of_points',)) |
---|
1278 | outfile.variables[key][:] = write_attributes[key] #.astype(Float32) |
---|
1279 | |
---|
1280 | if write_geo_reference is not None: |
---|
1281 | write_NetCDF_georeference(write_geo_reference, outfile) |
---|
1282 | |
---|
1283 | outfile.close() |
---|
1284 | |
---|
1285 | |
---|
1286 | ## |
---|
1287 | # @brief Write a .csv data file. |
---|
1288 | # @param file_name Path to the file to write. |
---|
1289 | # @param write_data_points Data points to write. |
---|
1290 | # @param write_attributes Attributes to write. |
---|
1291 | # @param as_lat_long True if points are lat/lon, else x/y. |
---|
1292 | # @param delimiter The CSV delimiter to use. |
---|
1293 | def _write_csv_file(file_name, |
---|
1294 | write_data_points, |
---|
1295 | write_attributes=None, |
---|
1296 | as_lat_long=False, |
---|
1297 | delimiter=','): |
---|
1298 | """Write a .csv file. |
---|
1299 | """ |
---|
1300 | |
---|
1301 | points = write_data_points |
---|
1302 | pointattributes = write_attributes |
---|
1303 | |
---|
1304 | fd = open(file_name, 'w') |
---|
1305 | |
---|
1306 | if as_lat_long: |
---|
1307 | titlelist = "latitude" + delimiter + "longitude" + delimiter |
---|
1308 | else: |
---|
1309 | titlelist = "x" + delimiter + "y" + delimiter |
---|
1310 | |
---|
1311 | if pointattributes is not None: |
---|
1312 | for title in pointattributes.keys(): |
---|
1313 | titlelist = titlelist + title + delimiter |
---|
1314 | titlelist = titlelist[0:-len(delimiter)] # remove the last delimiter |
---|
1315 | |
---|
1316 | fd.write(titlelist + "\n") |
---|
1317 | |
---|
1318 | # <x/lat> <y/long> [attributes] |
---|
1319 | for i, vert in enumerate( points): |
---|
1320 | if pointattributes is not None: |
---|
1321 | attlist = "," |
---|
1322 | for att in pointattributes.keys(): |
---|
1323 | attlist = attlist + str(pointattributes[att][i]) + delimiter |
---|
1324 | attlist = attlist[0:-len(delimiter)] # remove the last delimiter |
---|
1325 | attlist.strip() |
---|
1326 | else: |
---|
1327 | attlist = '' |
---|
1328 | |
---|
1329 | fd.write(str(vert[0]) + delimiter + str(vert[1]) + attlist + "\n") |
---|
1330 | |
---|
1331 | fd.close() |
---|
1332 | |
---|
1333 | |
---|
1334 | ## |
---|
1335 | # @brief Write a URS file. |
---|
1336 | # @param file_name The path of the file to write. |
---|
1337 | # @param points |
---|
1338 | # @param delimiter |
---|
1339 | def _write_urs_file(file_name, points, delimiter=' '): |
---|
1340 | """Write a URS format file. |
---|
1341 | export a file, file_name, with the urs format |
---|
1342 | the data points are in lats and longs |
---|
1343 | """ |
---|
1344 | |
---|
1345 | fd = open(file_name, 'w') |
---|
1346 | |
---|
1347 | # first line is # points |
---|
1348 | fd.write(str(len(points)) + "\n") |
---|
1349 | |
---|
1350 | # <lat> <long> <id#> |
---|
1351 | for i, vert in enumerate( points): |
---|
1352 | fd.write(str(round(vert[0],7)) + delimiter + |
---|
1353 | str(round(vert[1],7)) + delimiter + str(i) + "\n") |
---|
1354 | |
---|
1355 | fd.close() |
---|
1356 | |
---|
1357 | |
---|
1358 | ## |
---|
1359 | # @brief ?? |
---|
1360 | # @param point_atts ?? |
---|
1361 | # @return ?? |
---|
1362 | def _point_atts2array(point_atts): |
---|
1363 | point_atts['pointlist'] = num.array(point_atts['pointlist'], num.Float) |
---|
1364 | |
---|
1365 | for key in point_atts['attributelist'].keys(): |
---|
1366 | point_atts['attributelist'][key] = \ |
---|
1367 | num.array(point_atts['attributelist'][key], num.Float) |
---|
1368 | |
---|
1369 | return point_atts |
---|
1370 | |
---|
1371 | |
---|
1372 | ## |
---|
1373 | # @brief Convert geospatial object to a points dictionary. |
---|
1374 | # @param geospatial_data The geospatial object to convert. |
---|
1375 | # @return A points dictionary. |
---|
1376 | def geospatial_data2points_dictionary(geospatial_data): |
---|
1377 | """Convert geospatial data to points_dictionary |
---|
1378 | """ |
---|
1379 | |
---|
1380 | points_dictionary = {} |
---|
1381 | points_dictionary['pointlist'] = geospatial_data.data_points |
---|
1382 | |
---|
1383 | points_dictionary['attributelist'] = {} |
---|
1384 | |
---|
1385 | for attribute_name in geospatial_data.attributes.keys(): |
---|
1386 | val = geospatial_data.attributes[attribute_name] |
---|
1387 | points_dictionary['attributelist'][attribute_name] = val |
---|
1388 | |
---|
1389 | points_dictionary['geo_reference'] = geospatial_data.geo_reference |
---|
1390 | |
---|
1391 | return points_dictionary |
---|
1392 | |
---|
1393 | |
---|
1394 | ## |
---|
1395 | # @brief Convert a points dictionary to a geospatial object. |
---|
1396 | # @param points_dictionary A points dictionary to convert. |
---|
1397 | def points_dictionary2geospatial_data(points_dictionary): |
---|
1398 | """Convert points_dictionary to geospatial data object |
---|
1399 | """ |
---|
1400 | |
---|
1401 | msg = 'Points dictionary must have key pointlist' |
---|
1402 | assert points_dictionary.has_key('pointlist'), msg |
---|
1403 | |
---|
1404 | msg = 'Points dictionary must have key attributelist' |
---|
1405 | assert points_dictionary.has_key('attributelist'), msg |
---|
1406 | |
---|
1407 | if points_dictionary.has_key('geo_reference'): |
---|
1408 | geo = points_dictionary['geo_reference'] |
---|
1409 | else: |
---|
1410 | geo = None |
---|
1411 | |
---|
1412 | return Geospatial_data(points_dictionary['pointlist'], |
---|
1413 | points_dictionary['attributelist'], |
---|
1414 | geo_reference = geo) |
---|
1415 | |
---|
1416 | |
---|
1417 | ## |
---|
1418 | # @brief Split a string into 'clean' fields. |
---|
1419 | # @param line The string to process. |
---|
1420 | # @param delimiter The delimiter string to split 'line' with. |
---|
1421 | # @return A list of 'cleaned' field strings. |
---|
1422 | # @note Any fields that were initially zero length will be removed. |
---|
1423 | def clean_line(line,delimiter): |
---|
1424 | """Remove whitespace |
---|
1425 | """ |
---|
1426 | |
---|
1427 | line = line.strip() # probably unnecessary RW |
---|
1428 | numbers = line.split(delimiter) |
---|
1429 | |
---|
1430 | i = len(numbers) - 1 |
---|
1431 | while i >= 0: |
---|
1432 | if numbers[i] == '': |
---|
1433 | numbers.pop(i) |
---|
1434 | else: |
---|
1435 | numbers[i] = numbers[i].strip() |
---|
1436 | i += -1 |
---|
1437 | |
---|
1438 | return numbers |
---|
1439 | |
---|
1440 | |
---|
1441 | ## |
---|
1442 | # @brief Ensure that points are in absolute coordinates. |
---|
1443 | # @param points A list or array of points to check, or geospatial object. |
---|
1444 | # @param geo_reference If supplied, |
---|
1445 | # @return ?? |
---|
1446 | def ensure_absolute(points, geo_reference=None): |
---|
1447 | """Ensure that points are in absolute coordinates. |
---|
1448 | |
---|
1449 | This function inputs several formats and |
---|
1450 | outputs one format. - a numeric array of absolute points. |
---|
1451 | |
---|
1452 | Input formats are; |
---|
1453 | points: List or numeric array of coordinate pairs [xi, eta] of |
---|
1454 | points or geospatial object or points file name |
---|
1455 | |
---|
1456 | mesh_origin: A geo_reference object or 3-tuples consisting of |
---|
1457 | UTM zone, easting and northing. |
---|
1458 | If specified vertex coordinates are assumed to be |
---|
1459 | relative to their respective origins. |
---|
1460 | """ |
---|
1461 | |
---|
1462 | # Input check |
---|
1463 | if isinstance(points, basestring): |
---|
1464 | #It's a string - assume it is a point file |
---|
1465 | points = Geospatial_data(file_name=points) |
---|
1466 | |
---|
1467 | if isinstance(points, Geospatial_data): |
---|
1468 | points = points.get_data_points(absolute=True) |
---|
1469 | msg = 'Use a Geospatial_data object or a mesh origin, not both.' |
---|
1470 | assert geo_reference == None, msg |
---|
1471 | else: |
---|
1472 | points = ensure_numeric(points, num.Float) |
---|
1473 | |
---|
1474 | # Sort of geo_reference and convert points |
---|
1475 | if geo_reference is None: |
---|
1476 | geo = None #Geo_reference() |
---|
1477 | else: |
---|
1478 | if isinstance(geo_reference, Geo_reference): |
---|
1479 | geo = geo_reference |
---|
1480 | else: |
---|
1481 | geo = Geo_reference(geo_reference[0], |
---|
1482 | geo_reference[1], |
---|
1483 | geo_reference[2]) |
---|
1484 | points = geo.get_absolute(points) |
---|
1485 | |
---|
1486 | return points |
---|
1487 | |
---|
1488 | |
---|
1489 | ## |
---|
1490 | # @brief |
---|
1491 | # @param points |
---|
1492 | # @param geo_reference |
---|
1493 | # @return A geospatial object. |
---|
1494 | def ensure_geospatial(points, geo_reference=None): |
---|
1495 | """ |
---|
1496 | This function inputs several formats and |
---|
1497 | outputs one format. - a geospatial_data instance. |
---|
1498 | |
---|
1499 | Inputed formats are; |
---|
1500 | points: List or numeric array of coordinate pairs [xi, eta] of |
---|
1501 | points or geospatial object |
---|
1502 | |
---|
1503 | mesh_origin: A geo_reference object or 3-tuples consisting of |
---|
1504 | UTM zone, easting and northing. |
---|
1505 | If specified vertex coordinates are assumed to be |
---|
1506 | relative to their respective origins. |
---|
1507 | """ |
---|
1508 | |
---|
1509 | # Input check |
---|
1510 | if isinstance(points, Geospatial_data): |
---|
1511 | msg = "Use a Geospatial_data object or a mesh origin, not both." |
---|
1512 | assert geo_reference is None, msg |
---|
1513 | return points |
---|
1514 | else: |
---|
1515 | # List or numeric array of absolute points |
---|
1516 | points = ensure_numeric(points, num.Float) |
---|
1517 | |
---|
1518 | # Sort out geo reference |
---|
1519 | if geo_reference is None: |
---|
1520 | geo = None |
---|
1521 | else: |
---|
1522 | if isinstance(geo_reference, Geo_reference): |
---|
1523 | geo = geo_reference |
---|
1524 | else: |
---|
1525 | geo = Geo_reference(geo_reference[0], |
---|
1526 | geo_reference[1], |
---|
1527 | geo_reference[2]) |
---|
1528 | |
---|
1529 | # Create Geospatial_data object with appropriate geo reference and return |
---|
1530 | points = Geospatial_data(data_points=points, geo_reference=geo) |
---|
1531 | |
---|
1532 | return points |
---|
1533 | |
---|
1534 | |
---|
1535 | ## |
---|
1536 | # @brief |
---|
1537 | # @param data_file |
---|
1538 | # @param alpha_list |
---|
1539 | # @param mesh_file |
---|
1540 | # @param boundary_poly |
---|
1541 | # @param mesh_resolution |
---|
1542 | # @param north_boundary |
---|
1543 | # @param south_boundary |
---|
1544 | # @param east_boundary |
---|
1545 | # @param west_boundary |
---|
1546 | # @param plot_name |
---|
1547 | # @param split_factor |
---|
1548 | # @param seed_num |
---|
1549 | # @param cache |
---|
1550 | # @param verbose |
---|
1551 | def find_optimal_smoothing_parameter(data_file, |
---|
1552 | alpha_list=None, |
---|
1553 | mesh_file=None, |
---|
1554 | boundary_poly=None, |
---|
1555 | mesh_resolution=100000, |
---|
1556 | north_boundary=None, |
---|
1557 | south_boundary=None, |
---|
1558 | east_boundary=None, |
---|
1559 | west_boundary=None, |
---|
1560 | plot_name='all_alphas', |
---|
1561 | split_factor=0.1, |
---|
1562 | seed_num=None, |
---|
1563 | cache=False, |
---|
1564 | verbose=False): |
---|
1565 | """ |
---|
1566 | Removes a small random sample of points from 'data_file'. Then creates |
---|
1567 | models with different alpha values from 'alpha_list' and cross validates |
---|
1568 | the predicted value to the previously removed point data. Returns the |
---|
1569 | alpha value which has the smallest covariance. |
---|
1570 | |
---|
1571 | data_file: must not contain points outside the boundaries defined |
---|
1572 | and it either a pts, txt or csv file. |
---|
1573 | |
---|
1574 | alpha_list: the alpha values to test in a single list |
---|
1575 | |
---|
1576 | mesh_file: name of the created mesh file or if passed in will read it. |
---|
1577 | NOTE, if there is a mesh file mesh_resolution, |
---|
1578 | north_boundary, south... etc will be ignored. |
---|
1579 | |
---|
1580 | mesh_resolution: the maximum area size for a triangle |
---|
1581 | |
---|
1582 | north_boundary... west_boundary: the value of the boundary |
---|
1583 | |
---|
1584 | plot_name: the name for the plot contain the results |
---|
1585 | |
---|
1586 | seed_num: the seed to the random number generator |
---|
1587 | |
---|
1588 | USAGE: |
---|
1589 | value, alpha = find_optimal_smoothing_parameter(data_file=fileName, |
---|
1590 | alpha_list=[0.0001, 0.01, 1], |
---|
1591 | mesh_file=None, |
---|
1592 | mesh_resolution=3, |
---|
1593 | north_boundary=5, |
---|
1594 | south_boundary=-5, |
---|
1595 | east_boundary=5, |
---|
1596 | west_boundary=-5, |
---|
1597 | plot_name='all_alphas', |
---|
1598 | seed_num=100000, |
---|
1599 | verbose=False) |
---|
1600 | |
---|
1601 | OUTPUT: returns the minumum normalised covalance calculate AND the |
---|
1602 | alpha that created it. PLUS writes a plot of the results |
---|
1603 | |
---|
1604 | NOTE: code will not work if the data_file extent is greater than the |
---|
1605 | boundary_polygon or any of the boundaries, eg north_boundary...west_boundary |
---|
1606 | """ |
---|
1607 | |
---|
1608 | from anuga.shallow_water import Domain |
---|
1609 | from anuga.geospatial_data.geospatial_data import Geospatial_data |
---|
1610 | from anuga.pmesh.mesh_interface import create_mesh_from_regions |
---|
1611 | from anuga.utilities.numerical_tools import cov |
---|
1612 | from anuga.utilities.polygon import is_inside_polygon |
---|
1613 | from anuga.fit_interpolate.benchmark_least_squares import mem_usage |
---|
1614 | |
---|
1615 | |
---|
1616 | attribute_smoothed='elevation' |
---|
1617 | |
---|
1618 | if mesh_file is None: |
---|
1619 | if verbose: print "building mesh" |
---|
1620 | mesh_file = 'temp.msh' |
---|
1621 | |
---|
1622 | if north_boundary is None or south_boundary is None \ |
---|
1623 | or east_boundary is None or west_boundary is None: |
---|
1624 | no_boundary = True |
---|
1625 | else: |
---|
1626 | no_boundary = False |
---|
1627 | |
---|
1628 | if no_boundary is True: |
---|
1629 | msg = 'All boundaries must be defined' |
---|
1630 | raise msg |
---|
1631 | |
---|
1632 | poly_topo = [[east_boundary,south_boundary], |
---|
1633 | [east_boundary,north_boundary], |
---|
1634 | [west_boundary,north_boundary], |
---|
1635 | [west_boundary,south_boundary]] |
---|
1636 | |
---|
1637 | create_mesh_from_regions(poly_topo, |
---|
1638 | boundary_tags={'back': [2], |
---|
1639 | 'side': [1,3], |
---|
1640 | 'ocean': [0]}, |
---|
1641 | maximum_triangle_area=mesh_resolution, |
---|
1642 | filename=mesh_file, |
---|
1643 | use_cache=cache, |
---|
1644 | verbose=verbose) |
---|
1645 | |
---|
1646 | else: # if mesh file provided |
---|
1647 | #test mesh file exists? |
---|
1648 | if verbose: "reading from file: %s" % mesh_file |
---|
1649 | if access(mesh_file,F_OK) == 0: |
---|
1650 | msg = "file %s doesn't exist!" % mesh_file |
---|
1651 | raise IOError, msg |
---|
1652 | |
---|
1653 | #split topo data |
---|
1654 | if verbose: print 'Reading elevation file: %s' % data_file |
---|
1655 | G = Geospatial_data(file_name = data_file) |
---|
1656 | if verbose: print 'Start split' |
---|
1657 | G_small, G_other = G.split(split_factor, seed_num, verbose=verbose) |
---|
1658 | if verbose: print 'Finish split' |
---|
1659 | points = G_small.get_data_points() |
---|
1660 | |
---|
1661 | if verbose: print "Number of points in sample to compare: ", len(points) |
---|
1662 | |
---|
1663 | if alpha_list == None: |
---|
1664 | alphas = [0.001,0.01,100] |
---|
1665 | #alphas = [0.000001, 0.00001, 0.0001, 0.001, 0.01,\ |
---|
1666 | # 0.1, 1.0, 10.0, 100.0,1000.0,10000.0] |
---|
1667 | else: |
---|
1668 | alphas = alpha_list |
---|
1669 | |
---|
1670 | #creates array with columns 1 and 2 are x, y. column 3 is elevation |
---|
1671 | #4 onwards is the elevation_predicted using the alpha, which will |
---|
1672 | #be compared later against the real removed data |
---|
1673 | data = num.array([], typecode=num.Float) |
---|
1674 | |
---|
1675 | data=num.resize(data, (len(points), 3+len(alphas))) |
---|
1676 | |
---|
1677 | #gets relative point from sample |
---|
1678 | data[:,0] = points[:,0] |
---|
1679 | data[:,1] = points[:,1] |
---|
1680 | elevation_sample = G_small.get_attributes(attribute_name=attribute_smoothed) |
---|
1681 | data[:,2] = elevation_sample |
---|
1682 | |
---|
1683 | normal_cov=num.array(num.zeros([len(alphas), 2]), typecode=num.Float) |
---|
1684 | |
---|
1685 | if verbose: print 'Setup computational domains with different alphas' |
---|
1686 | |
---|
1687 | for i, alpha in enumerate(alphas): |
---|
1688 | #add G_other data to domains with different alphas |
---|
1689 | if verbose: |
---|
1690 | print '\n Calculating domain and mesh for Alpha = ', alpha, '\n' |
---|
1691 | domain = Domain(mesh_file, use_cache=cache, verbose=verbose) |
---|
1692 | if verbose: print domain.statistics() |
---|
1693 | domain.set_quantity(attribute_smoothed, |
---|
1694 | geospatial_data=G_other, |
---|
1695 | use_cache=cache, |
---|
1696 | verbose=verbose, |
---|
1697 | alpha=alpha) |
---|
1698 | |
---|
1699 | # Convert points to geospatial data for use with get_values below |
---|
1700 | points_geo = Geospatial_data(points, domain.geo_reference) |
---|
1701 | |
---|
1702 | #returns the predicted elevation of the points that were "split" out |
---|
1703 | #of the original data set for one particular alpha |
---|
1704 | if verbose: print 'Get predicted elevation for location to be compared' |
---|
1705 | elevation_predicted = \ |
---|
1706 | domain.quantities[attribute_smoothed].\ |
---|
1707 | get_values(interpolation_points=points_geo) |
---|
1708 | |
---|
1709 | #add predicted elevation to array that starts with x, y, z... |
---|
1710 | data[:,i+3] = elevation_predicted |
---|
1711 | |
---|
1712 | sample_cov = cov(elevation_sample) |
---|
1713 | ele_cov = cov(elevation_sample - elevation_predicted) |
---|
1714 | normal_cov[i,:] = [alpha,ele_cov / sample_cov] |
---|
1715 | |
---|
1716 | if verbose: |
---|
1717 | print 'Covariance for alpha ', normal_cov[i][0], '= ', \ |
---|
1718 | normal_cov[i][1] |
---|
1719 | print '-------------------------------------------- \n' |
---|
1720 | |
---|
1721 | normal_cov0 = normal_cov[:,0] |
---|
1722 | normal_cov_new = num.take(normal_cov, num.argsort(normal_cov0)) |
---|
1723 | |
---|
1724 | if plot_name is not None: |
---|
1725 | from pylab import savefig, semilogx, loglog |
---|
1726 | |
---|
1727 | semilogx(normal_cov_new[:,0], normal_cov_new[:,1]) |
---|
1728 | loglog(normal_cov_new[:,0], normal_cov_new[:,1]) |
---|
1729 | savefig(plot_name, dpi=300) |
---|
1730 | |
---|
1731 | if mesh_file == 'temp.msh': |
---|
1732 | remove(mesh_file) |
---|
1733 | |
---|
1734 | if verbose: |
---|
1735 | print 'Final results:' |
---|
1736 | for i, alpha in enumerate(alphas): |
---|
1737 | print 'covariance for alpha %s = %s ' \ |
---|
1738 | % (normal_cov[i][0], normal_cov[i][1]) |
---|
1739 | print '\n Optimal alpha is: %s ' \ |
---|
1740 | % normal_cov_new[(num.argmin(normal_cov_new, axis=0))[1], 0] |
---|
1741 | |
---|
1742 | # covariance and optimal alpha |
---|
1743 | return (min(normal_cov_new[:,1]), |
---|
1744 | normal_cov_new[(num.argmin(normal_cov_new,axis=0))[1],0]) |
---|
1745 | |
---|
1746 | |
---|
1747 | ## |
---|
1748 | # @brief |
---|
1749 | # @param data_file |
---|
1750 | # @param alpha_list |
---|
1751 | # @param mesh_file |
---|
1752 | # @param boundary_poly |
---|
1753 | # @param mesh_resolution |
---|
1754 | # @param north_boundary |
---|
1755 | # @param south_boundary |
---|
1756 | # @param east_boundary |
---|
1757 | # @param west_boundary |
---|
1758 | # @param plot_name |
---|
1759 | # @param split_factor |
---|
1760 | # @param seed_num |
---|
1761 | # @param cache |
---|
1762 | # @param verbose |
---|
1763 | def old_find_optimal_smoothing_parameter(data_file, |
---|
1764 | alpha_list=None, |
---|
1765 | mesh_file=None, |
---|
1766 | boundary_poly=None, |
---|
1767 | mesh_resolution=100000, |
---|
1768 | north_boundary=None, |
---|
1769 | south_boundary=None, |
---|
1770 | east_boundary=None, |
---|
1771 | west_boundary=None, |
---|
1772 | plot_name='all_alphas', |
---|
1773 | split_factor=0.1, |
---|
1774 | seed_num=None, |
---|
1775 | cache=False, |
---|
1776 | verbose=False): |
---|
1777 | """ |
---|
1778 | data_file: must not contain points outside the boundaries defined |
---|
1779 | and it either a pts, txt or csv file. |
---|
1780 | |
---|
1781 | alpha_list: the alpha values to test in a single list |
---|
1782 | |
---|
1783 | mesh_file: name of the created mesh file or if passed in will read it. |
---|
1784 | NOTE, if there is a mesh file mesh_resolution, |
---|
1785 | north_boundary, south... etc will be ignored. |
---|
1786 | |
---|
1787 | mesh_resolution: the maximum area size for a triangle |
---|
1788 | |
---|
1789 | north_boundary... west_boundary: the value of the boundary |
---|
1790 | |
---|
1791 | plot_name: the name for the plot contain the results |
---|
1792 | |
---|
1793 | seed_num: the seed to the random number generator |
---|
1794 | |
---|
1795 | USAGE: |
---|
1796 | value, alpha = find_optimal_smoothing_parameter(data_file=fileName, |
---|
1797 | alpha_list=[0.0001, 0.01, 1], |
---|
1798 | mesh_file=None, |
---|
1799 | mesh_resolution=3, |
---|
1800 | north_boundary=5, |
---|
1801 | south_boundary=-5, |
---|
1802 | east_boundary=5, |
---|
1803 | west_boundary=-5, |
---|
1804 | plot_name='all_alphas', |
---|
1805 | seed_num=100000, |
---|
1806 | verbose=False) |
---|
1807 | |
---|
1808 | OUTPUT: returns the minumum normalised covalance calculate AND the |
---|
1809 | alpha that created it. PLUS writes a plot of the results |
---|
1810 | |
---|
1811 | NOTE: code will not work if the data_file extend is greater than the |
---|
1812 | boundary_polygon or the north_boundary...west_boundary |
---|
1813 | """ |
---|
1814 | |
---|
1815 | from anuga.shallow_water import Domain |
---|
1816 | from anuga.geospatial_data.geospatial_data import Geospatial_data |
---|
1817 | from anuga.pmesh.mesh_interface import create_mesh_from_regions |
---|
1818 | from anuga.utilities.numerical_tools import cov |
---|
1819 | from anuga.utilities.polygon import is_inside_polygon |
---|
1820 | from anuga.fit_interpolate.benchmark_least_squares import mem_usage |
---|
1821 | |
---|
1822 | |
---|
1823 | attribute_smoothed = 'elevation' |
---|
1824 | |
---|
1825 | if mesh_file is None: |
---|
1826 | mesh_file = 'temp.msh' |
---|
1827 | |
---|
1828 | if north_boundary is None or south_boundary is None \ |
---|
1829 | or east_boundary is None or west_boundary is None: |
---|
1830 | no_boundary = True |
---|
1831 | else: |
---|
1832 | no_boundary = False |
---|
1833 | |
---|
1834 | if no_boundary is True: |
---|
1835 | msg = 'All boundaries must be defined' |
---|
1836 | raise msg |
---|
1837 | |
---|
1838 | poly_topo = [[east_boundary,south_boundary], |
---|
1839 | [east_boundary,north_boundary], |
---|
1840 | [west_boundary,north_boundary], |
---|
1841 | [west_boundary,south_boundary]] |
---|
1842 | |
---|
1843 | create_mesh_from_regions(poly_topo, |
---|
1844 | boundary_tags={'back': [2], |
---|
1845 | 'side': [1,3], |
---|
1846 | 'ocean': [0]}, |
---|
1847 | maximum_triangle_area=mesh_resolution, |
---|
1848 | filename=mesh_file, |
---|
1849 | use_cache=cache, |
---|
1850 | verbose=verbose) |
---|
1851 | |
---|
1852 | else: # if mesh file provided |
---|
1853 | #test mesh file exists? |
---|
1854 | if access(mesh_file,F_OK) == 0: |
---|
1855 | msg = "file %s doesn't exist!" % mesh_file |
---|
1856 | raise IOError, msg |
---|
1857 | |
---|
1858 | #split topo data |
---|
1859 | G = Geospatial_data(file_name=data_file) |
---|
1860 | if verbose: print 'start split' |
---|
1861 | G_small, G_other = G.split(split_factor, seed_num, verbose=verbose) |
---|
1862 | if verbose: print 'finish split' |
---|
1863 | points = G_small.get_data_points() |
---|
1864 | |
---|
1865 | #FIXME: Remove points outside boundary polygon |
---|
1866 | # print 'new point',len(points) |
---|
1867 | # |
---|
1868 | # new_points=[] |
---|
1869 | # new_points=array([],typecode=Float) |
---|
1870 | # new_points=resize(new_points,(len(points),2)) |
---|
1871 | # print "BOUNDARY", boundary_poly |
---|
1872 | # for i,point in enumerate(points): |
---|
1873 | # if is_inside_polygon(point,boundary_poly, verbose=True): |
---|
1874 | # new_points[i] = point |
---|
1875 | # print"WOW",i,new_points[i] |
---|
1876 | # points = new_points |
---|
1877 | |
---|
1878 | if verbose: print "Number of points in sample to compare: ", len(points) |
---|
1879 | |
---|
1880 | if alpha_list == None: |
---|
1881 | alphas = [0.001,0.01,100] |
---|
1882 | #alphas = [0.000001, 0.00001, 0.0001, 0.001, 0.01,\ |
---|
1883 | # 0.1, 1.0, 10.0, 100.0,1000.0,10000.0] |
---|
1884 | else: |
---|
1885 | alphas = alpha_list |
---|
1886 | |
---|
1887 | domains = {} |
---|
1888 | |
---|
1889 | if verbose: print 'Setup computational domains with different alphas' |
---|
1890 | |
---|
1891 | for alpha in alphas: |
---|
1892 | #add G_other data to domains with different alphas |
---|
1893 | if verbose: |
---|
1894 | print '\n Calculating domain and mesh for Alpha = ', alpha, '\n' |
---|
1895 | domain = Domain(mesh_file, use_cache=cache, verbose=verbose) |
---|
1896 | if verbose: print domain.statistics() |
---|
1897 | domain.set_quantity(attribute_smoothed, |
---|
1898 | geospatial_data=G_other, |
---|
1899 | use_cache=cache, |
---|
1900 | verbose=verbose, |
---|
1901 | alpha=alpha) |
---|
1902 | domains[alpha] = domain |
---|
1903 | |
---|
1904 | #creates array with columns 1 and 2 are x, y. column 3 is elevation |
---|
1905 | #4 onwards is the elevation_predicted using the alpha, which will |
---|
1906 | #be compared later against the real removed data |
---|
1907 | data = num.array([], typecode=num.Float) |
---|
1908 | |
---|
1909 | data = num.resize(data, (len(points), 3+len(alphas))) |
---|
1910 | |
---|
1911 | #gets relative point from sample |
---|
1912 | data[:,0] = points[:,0] |
---|
1913 | data[:,1] = points[:,1] |
---|
1914 | elevation_sample = G_small.get_attributes(attribute_name=attribute_smoothed) |
---|
1915 | data[:,2] = elevation_sample |
---|
1916 | |
---|
1917 | normal_cov = num.array(num.zeros([len(alphas), 2]), typecode=num.Float) |
---|
1918 | |
---|
1919 | if verbose: |
---|
1920 | print 'Determine difference between predicted results and actual data' |
---|
1921 | for i, alpha in enumerate(domains): |
---|
1922 | if verbose: print'Alpha =', alpha |
---|
1923 | |
---|
1924 | points_geo = domains[alpha].geo_reference.change_points_geo_ref(points) |
---|
1925 | #returns the predicted elevation of the points that were "split" out |
---|
1926 | #of the original data set for one particular alpha |
---|
1927 | elevation_predicted = \ |
---|
1928 | domains[alpha].quantities[attribute_smoothed].\ |
---|
1929 | get_values(interpolation_points=points_geo) |
---|
1930 | |
---|
1931 | #add predicted elevation to array that starts with x, y, z... |
---|
1932 | data[:,i+3] = elevation_predicted |
---|
1933 | |
---|
1934 | sample_cov = cov(elevation_sample) |
---|
1935 | ele_cov = cov(elevation_sample - elevation_predicted) |
---|
1936 | normal_cov[i,:] = [alpha,ele_cov / sample_cov] |
---|
1937 | print 'memory usage during compare', mem_usage() |
---|
1938 | if verbose: print 'cov', normal_cov[i][0], '= ', normal_cov[i][1] |
---|
1939 | |
---|
1940 | normal_cov0 = normal_cov[:,0] |
---|
1941 | normal_cov_new = num.take(normal_cov, num.argsort(normal_cov0)) |
---|
1942 | |
---|
1943 | if plot_name is not None: |
---|
1944 | from pylab import savefig,semilogx,loglog |
---|
1945 | |
---|
1946 | semilogx(normal_cov_new[:,0], normal_cov_new[:,1]) |
---|
1947 | loglog(normal_cov_new[:,0], normal_cov_new[:,1]) |
---|
1948 | savefig(plot_name, dpi=300) |
---|
1949 | if mesh_file == 'temp.msh': |
---|
1950 | remove(mesh_file) |
---|
1951 | |
---|
1952 | return (min(normal_cov_new[:,1]), |
---|
1953 | normal_cov_new[(num.argmin(normal_cov_new, axis=0))[1],0]) |
---|
1954 | |
---|
1955 | |
---|
1956 | if __name__ == "__main__": |
---|
1957 | pass |
---|
1958 | |
---|