Changeset 7776 for trunk/anuga_core/source/anuga/file/csv_file.py
- Timestamp:
- Jun 3, 2010, 6:03:07 PM (14 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/anuga_core/source/anuga/file/csv_file.py
r7772 r7776 2 2 A set of functions which extend the capabilities of the Python csv 3 3 module. 4 5 CSV files have the extension .csv, which stands for Comma Separated Value 6 file. There is no standardised form for this format, so the user is provided 7 with a variety of options for parsing different styles of csv files. 4 8 5 9 These have been left as functions to aviod confusion with the standard … … 236 240 log.critical(msg) 237 241 242 243 244 def csv2building_polygons(file_name, 245 floor_height=3, 246 clipping_polygons=None): 247 """ 248 Convert CSV files of the form: 249 250 easting,northing,id,floors 251 422664.22,870785.46,2,0 252 422672.48,870780.14,2,0 253 422668.17,870772.62,2,0 254 422660.35,870777.17,2,0 255 422664.22,870785.46,2,0 256 422661.30,871215.06,3,1 257 422667.50,871215.70,3,1 258 422668.30,871204.86,3,1 259 422662.21,871204.33,3,1 260 422661.30,871215.06,3,1 261 262 to a dictionary of polygons with id as key. 263 The associated number of floors are converted to m above MSL and 264 returned as a separate dictionary also keyed by id. 265 266 Optional parameter floor_height is the height of each building story. 267 Optional parameter clipping_olygons is a list of polygons selecting 268 buildings. Any building not in these polygons will be omitted. 269 270 See csv2polygons for more details 271 """ 272 273 polygons, values = csv2polygons(file_name, 274 value_name='floors', 275 clipping_polygons=None) 276 277 278 heights = {} 279 for key in values.keys(): 280 v = float(values[key]) 281 heights[key] = v*floor_height 282 283 return polygons, heights 284 285 286 ## 287 # @brief Convert CSV file into a dictionary of polygons and associated values. 288 # @param filename The path to the file to read, value_name name for the 4th column 289 def csv2polygons(file_name, 290 value_name='value', 291 clipping_polygons=None): 292 """ 293 Convert CSV files of the form: 294 295 easting,northing,id,value 296 422664.22,870785.46,2,0 297 422672.48,870780.14,2,0 298 422668.17,870772.62,2,0 299 422660.35,870777.17,2,0 300 422664.22,870785.46,2,0 301 422661.30,871215.06,3,1 302 422667.50,871215.70,3,1 303 422668.30,871204.86,3,1 304 422662.21,871204.33,3,1 305 422661.30,871215.06,3,1 306 307 to a dictionary of polygons with id as key. 308 The associated values are returned as a separate dictionary also keyed by id. 309 310 311 easting: x coordinate relative to zone implied by the model 312 northing: y coordinate relative to zone implied by the model 313 id: tag for polygon comprising points with this tag 314 value: numeral associated with each polygon. These must be the same for all points in each polygon. 315 316 The last header, value, can take on other names such as roughness, floors, etc - or it can be omitted 317 in which case the returned values will be None 318 319 Eastings and Northings will be returned as floating point values while 320 id and values will be returned as strings. 321 322 Optional argument: clipping_polygons will select only those polygons that are 323 fully within one or more of the clipping_polygons. In other words any polygon from 324 the csv file which has at least one point not inside one of the clipping polygons 325 will be excluded 326 327 See underlying function load_csv_as_dict for more details. 328 """ 329 330 X, _ = load_csv_as_dict(file_name) 331 332 msg = 'Polygon csv file must have 3 or 4 columns' 333 assert len(X.keys()) in [3, 4], msg 334 335 msg = 'Did not find expected column header: easting' 336 assert 'easting' in X.keys(), msg 337 338 msg = 'Did not find expected column header: northing' 339 assert 'northing' in X.keys(), northing 340 341 msg = 'Did not find expected column header: northing' 342 assert 'id' in X.keys(), msg 343 344 if value_name is not None: 345 msg = 'Did not find expected column header: %s' % value_name 346 assert value_name in X.keys(), msg 347 348 polygons = {} 349 if len(X.keys()) == 4: 350 values = {} 351 else: 352 values = None 353 354 # Loop through entries and compose polygons 355 excluded_polygons={} 356 past_ids = {} 357 last_id = None 358 for i, id in enumerate(X['id']): 359 360 # Check for duplicate polygons 361 if id in past_ids: 362 msg = 'Polygon %s was duplicated in line %d' % (id, i) 363 raise Exception, msg 364 365 if id not in polygons: 366 # Start new polygon 367 polygons[id] = [] 368 if values is not None: 369 values[id] = X[value_name][i] 370 371 # Keep track of previous polygon ids 372 if last_id is not None: 373 past_ids[last_id] = i 374 375 # Append this point to current polygon 376 point = [float(X['easting'][i]), float(X['northing'][i])] 377 378 if clipping_polygons is not None: 379 exclude=True 380 for clipping_polygon in clipping_polygons: 381 if inside_polygon(point, clipping_polygon): 382 exclude=False 383 break 384 385 if exclude is True: 386 excluded_polygons[id]=True 387 388 polygons[id].append(point) 389 390 # Check that value is the same across each polygon 391 msg = 'Values must be the same across each polygon.' 392 msg += 'I got %s in line %d but it should have been %s' % (X[value_name][i], i, values[id]) 393 assert values[id] == X[value_name][i], msg 394 395 last_id = id 396 397 # Weed out polygons that were not wholly inside clipping polygons 398 for id in excluded_polygons: 399 del polygons[id] 400 401 return polygons, values 402 403 404 405 406
Note: See TracChangeset
for help on using the changeset viewer.