Changeset 5014
- Timestamp:
- Feb 8, 2008, 5:29:54 PM (17 years ago)
- Location:
- anuga_core/source/anuga/utilities
- Files:
-
- 1 added
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
anuga_core/source/anuga/utilities/data_audit.py
r4970 r5014 5 5 from os.path import join, splitext 6 6 7 from anuga.utilities.xml_tools import parse, pretty_print_tree, get_elements, get_text7 from anuga.utilities.xml_tools import xml2object, XML_element 8 8 from anuga.utilities.system_tools import compute_checksum 9 10 from data_audit_config import extensions_to_ignore, directories_to_ignore, files_to_ignore 11 12 9 13 10 14 # Audit exceptions … … 15 19 class WrongTags(Exception): pass 16 20 17 audit_exceptions = (NotPublishable, FilenameMismatch, CRCMismatch, Invalid, WrongTags) 21 audit_exceptions = (NotPublishable, 22 FilenameMismatch, 23 CRCMismatch, 24 Invalid, 25 WrongTags) 18 26 19 27 def IP_verified(directory, verbose=False): … … 37 45 """ 38 46 39 print '---------------------------------------------'40 print 'Files that need to be assessed for IP issues:'41 print '---------------------------------------------'42 43 47 # Print header 44 48 dirwidth = 72 45 print '---------------------------------------------'46 print 'File'.ljust(dirwidth), 'Status'47 print '---------------------------------------------'48 49 49 50 # Identify data files 51 first_time = True 50 52 all_files_accounted_for = True 51 53 for dirpath, datafile in identify_datafiles(directory): … … 66 68 except audit_exceptions, e: 67 69 all_files_accounted_for = False 68 status = 'LICENSE FILE NOT VALID' 69 status += 'REASON: %s' %e 70 71 #doc = parse(fid) 72 #pretty_print_tree(doc) 73 fid.seek(0) 74 status += fid.read() 75 76 #else: 77 # if verbose: print 'OK' 70 status = 'LICENSE FILE NOT VALID\n' 71 status += 'REASON: %s\n' %e 72 73 try: 74 doc = xml2object(fid) 75 except: 76 status += 'XML file could not be read:' 77 fid.seek(0) 78 status += fid.read() 79 else: 80 status += str(doc) 78 81 79 82 fid.close() 80 83 81 84 if status != 'OK' or verbose is True: 85 if first_time is True: 86 # Print header 87 print '---------------------------------------------' 88 print 'Files that need to be assessed for IP issuses'.ljust(dirwidth), 'Status' 89 print '---------------------------------------------' 90 first_time = False 91 82 92 print filename + ' (Checksum=%s): '\ 83 93 %str(compute_checksum(filename)), status … … 92 102 """ Identify files that might contain data 93 103 """ 94 95 # Ignore source code files96 extensions_to_ignore = ['.py','.c','.h', '.f'] #, '.gif', '.jpg', '.png']97 98 # Ignore generated stuff99 extensions_to_ignore += ['.pyc', '.o', '.so', '~']100 extensions_to_ignore += ['.aux', '.log', '.idx', 'ilg', '.ind',101 '.bbl', '.blg']102 103 # Ignore license files themselves104 extensions_to_ignore += ['.lic']105 106 107 # Ignore certain other files108 files_to_ignore = ['README.txt']109 110 # Ignore directories111 directories_to_ignore = ['anuga_work', 'pymetis', 'obsolete_code',112 'anuga_parallel', 'anuga_viewer',113 'planning', 'coding_standards',114 'experimentation',115 '.svn', 'misc', '.metadata']116 104 117 105 for dirpath, dirnames, filenames in walk(root): … … 145 133 146 134 license_filename = fid.name 147 doc = parse(fid) 148 #print_tree(doc) 149 135 136 doc = xml2object(fid) 137 #print doc 138 139 150 140 # Check that file is valid (e.g. all elements there) 151 # FIXME (Ole): Todo 152 153 154 if doc.nodeName != '#document': 155 msg = 'License file %s does not appear' %license_filename 156 msg += 'to be a valid XML document' 157 msg += 'The root node has name %s' %doc.nodeName 158 msg += 'but it should be %s' %'#document' 159 raise Invalid, msg 160 161 if len(doc.childNodes) != 1: 162 msg = 'License file %s must have only one element' %license_filename 163 msg += ' at the root level. It is\n ' 164 msg += '<ga_license_file>' 165 raise Invalid, msg 166 167 168 # Start looking at document in earnest 169 root_node = doc.childNodes[0] 170 if root_node.nodeName != 'ga_license_file': 141 if not doc.has_key('ga_license_file'): 171 142 msg = 'License file %s must have two elements' %license_filename 172 msg += ' at the root level. They are\n 173 msg += ' <?xml version="1.0" encoding="iso-8859-1"?>\n'174 msg += ' <ga_license_file>\n'175 msg += 'The second element was found to be %s' % root_node.nodeName143 msg += ' at the root level. They are\n' 144 msg += ' <?xml version="1.0" encoding="iso-8859-1"?>\n' 145 msg += ' <ga_license_file>\n' 146 msg += 'The second element was found to be %s' %doc.keys() 176 147 raise WrongTags, msg 177 148 178 149 179 150 # Validate elements: metadata, datafile, datafile, ... 180 elements = get_elements(root_node.childNodes)181 if elements[0].nodeName != 'metadata':182 msg = 'T he first element under %s must be"metadata"'\183 % root_node.nodeName151 elements = doc['ga_license_file'] 152 if not elements.has_key('metadata'): 153 msg = 'Tag %s must have the element "metadata"'\ 154 %doc.keys()[0] 184 155 msg += 'The element found was %s' %elements[0].nodeName 185 156 raise WrongTags, msg 186 157 187 for node in elements[1:]: 188 if node.nodeName != 'datafile': 189 msg = 'All elements, except the first, under %s must '\ 190 %root_node.nodeName 191 msg += 'be "datafile"' 192 msg += 'The element found was %s' %node.nodeName 193 raise WrongTags, msg 194 195 if verbose: print 196 # Extract information for source section 197 for node in get_elements(elements[0].childNodes): 198 if node.nodeName == 'author': 199 # Do something 200 if verbose: print 'Author: ', get_text(node.childNodes) 201 202 if node.nodeName == 'svn_keywords': 203 # Do nothing 204 pass 158 if not elements.has_key('datafile'): 159 msg = 'Tag %s must have the element "datafile"'\ 160 %doc.keys()[0] 161 msg += 'The element found was %s' %elements[0].nodeName 162 raise WrongTags, msg 163 164 for key in elements.keys(): 165 msg = 'Invalid tag: %s' %key 166 if not key in ['metadata', 'datafile']: 167 raise WrongTags, msg 168 169 170 # Extract information for metadata section 171 if verbose: print 172 metadata = elements['metadata'] 173 174 author = metadata['author'] 175 if verbose: print 'Author: ', author 176 177 #svn_keywords = metadata['svn_keywords'] 178 #if verbose: print 'SVN keywords: ', svn_keywords 179 205 180 206 181 # Extract information for datafile sections 207 for datanode in elements[1:]: 182 datafile = elements['datafile'] 183 if isinstance(datafile, XML_element): 184 datafile = [datafile] 185 186 for data in datafile: 208 187 if verbose: print 209 210 for node in get_elements(datanode.childNodes): 211 #print 'Node', node.nodeName, node.childNodes 212 #continue 213 214 if node.nodeName == 'filename': 215 # FIXME Check correctness 216 filename = join(dirpath, get_text(node.childNodes)) 217 if verbose: print 'Filename: "%s"' %filename 218 try: 219 fid = open(filename, 'r') 220 except: 221 msg = 'Specified filename %s could not be opened'\ 222 %filename 223 raise FilenameMismatch, msg 224 225 if node.nodeName == 'checksum': 226 # FIXME (Ole): This relies on crc being preceded by filename 227 reported_crc = get_text(node.childNodes) 228 if verbose: print 'Checksum: "%s"' %reported_crc 229 230 file_crc = str(compute_checksum(filename)) 231 232 if reported_crc != file_crc: 233 msg = 'Bad checksum (CRC).\n' 234 msg += ' The CRC reported in license file "%s" is "%s"\n'\ 235 %(license_filename, reported_crc) 236 msg += ' The CRC computed from file "%s" is "%s"'\ 237 %(filename, file_crc) 238 raise CRCMismatch, msg 188 189 # Filename 190 if data['filename'] == '': 191 msg = 'Missing filename' 192 raise FilenameMismatch, msg 193 else: 194 filename = join(dirpath, data['filename']) 195 if verbose: print 'Filename: "%s"' %filename 196 try: 197 fid = open(filename, 'r') 198 except: 199 msg = 'Specified filename %s could not be opened'\ 200 %filename 201 raise FilenameMismatch, msg 202 203 # CRC 204 reported_crc = data['checksum'] 205 if verbose: print 'Checksum: "%s"' %reported_crc 206 207 file_crc = str(compute_checksum(filename)) 208 if reported_crc != file_crc: 209 msg = 'Bad checksum (CRC).\n' 210 msg += ' The CRC reported in license file "%s" is "%s"\n'\ 211 %(license_filename, reported_crc) 212 msg += ' The CRC computed from file "%s" is "%s"'\ 213 %(filename, file_crc) 214 raise CRCMismatch, msg 239 215 240 241 if node.nodeName == 'accountable': 242 accountable = get_text(node.childNodes) 243 if verbose: print 'Accountable: "%s"' %accountable 244 if accountable == "": 245 msg = 'No accountable person specified' 246 raise Exception, msg 247 248 if node.nodeName == 'source': 249 source = get_text(node.childNodes) 250 if verbose: print 'Source: "%s"' %source 251 if source == "": 252 msg = 'No source specified' 253 raise Exception, msg 254 255 if node.nodeName == 'IP_owner': 256 ip_owner = get_text(node.childNodes) 257 if verbose: print 'IP owner: "%s"' %ip_owner 258 if ip_owner == "": 259 msg = 'No IP owner specified' 260 raise Exception, msg 216 # Accountable 217 accountable = data['accountable'] 218 if verbose: print 'Accountable: "%s"' %accountable 219 if accountable == '': 220 msg = 'No accountable person specified' 221 raise Exception, msg 222 223 # Source 224 source = data['source'] 225 if verbose: print 'Source: "%s"' %source 226 if source == '': 227 msg = 'No source specified' 228 raise Exception, msg 229 230 # IP owner 231 ip_owner = data['IP_owner'] 232 if verbose: print 'IP owner: "%s"' %ip_owner 233 if ip_owner == '': 234 msg = 'No IP owner specified' 235 raise Exception, msg 261 236 262 263 if node.nodeName == 'IP_info': 264 if verbose: print 'IP info: "%s"' %get_text(node.childNodes) 265 266 267 if node.nodeName == 'publishable': 268 269 if verbose: print 'Publishable: %s' %fid.name 270 value = get_text(node.childNodes) 271 if value.upper() != 'YES': 272 msg = 'Data file %s is not flagged as publishable'\ 273 %fid.name 274 raise NotPublishable, msg 237 # IP info 238 ip_info = data['IP_info'] 239 if verbose: print 'IP info: "%s"' %ip_info 240 if ip_info == '': 241 msg = 'No IP info specified' 242 raise Exception, msg 243 244 # Publishable 245 publishable = data['publishable'].upper() 246 if verbose: print 'Publishable: "%s"' %publishable 247 if publishable != 'YES': 248 msg = 'Data file %s is not flagged as publishable'\ 249 %fid.name 250 raise NotPublishable, msg 275 251 276 252 -
anuga_core/source/anuga/utilities/mainland_only.lic
r4976 r5014 4 4 <metadata> 5 5 <author>Ole Nielsen</author> 6 <svn_keywords>7 <author>$Author$</author>8 <date>$Date$</date>9 <revision>$Revision$</revision>10 <url>$URL$</url>11 <id>$Id$</id>12 </svn_keywords>13 6 </metadata> 14 7 <datafile> -
anuga_core/source/anuga/utilities/test_data_audit.py
r4971 r5014 4 4 import unittest 5 5 from Numeric import zeros, array, allclose, Float 6 from tempfile import NamedTemporaryFile6 from tempfile import mkstemp 7 7 import os 8 8 … … 16 16 pass 17 17 18 def NOtest_license_file_is_not_valid(self): 19 """Basic test using an invalid XML file 20 """ 21 22 # FIXME(OLE): Needs work to ensure that the order of 23 # problems is deterministic. Currently we check for checksum 24 # but on some systems file or publishable may come first 25 26 27 # Generate invalid example 28 29 fid = NamedTemporaryFile(mode='w', 30 suffix='.asc', 31 dir='.') 32 string = 'Example data file with textual content. AAAABBBBCCCC1234' 33 fid.write(string) 34 fid.flush() 18 def test_license_file_is_not_valid1(self): 19 """Basic test using an invalid XML file. This one 20 should fail on bad CRC checksum 21 """ 22 23 # Generate invalid checksum example 24 25 tmp_fd , tmp_name = mkstemp(suffix='.asc', dir='.') 26 fid = os.fdopen(tmp_fd, 'w') 27 28 string = 'Example data file with textual content. AAAABBBBCCCC1234' 29 fid.write(string) 30 fid.close() 35 31 36 32 # Create associated license file 37 basename, ext = os.path.splitext( fid.name)33 basename, ext = os.path.splitext(tmp_name) 38 34 license_filename = basename + '.lic' 39 35 40 #print fid.name, license_filename41 36 licfid = open(license_filename, 'w') 42 37 xml_string = """<?xml version="1.0" encoding="iso-8859-1"?> … … 54 49 </metadata> 55 50 <datafile> 56 <filename> mainland_only.csv</filename>57 <checksum>-1 661725548</checksum>58 <publishable> No</publishable>51 <filename>%s</filename> 52 <checksum>-111111</checksum> 53 <publishable>Yes</publishable> 59 54 <accountable>Jane Sexton</accountable> 60 55 <source>Unknown</source> … … 72 67 73 68 </ga_license_file> 74 """ 69 """ %tmp_name 70 75 71 licfid.write(xml_string) 76 72 licfid.close() … … 78 74 licfid = open(license_filename) 79 75 #print licfid.read() 80 76 81 77 try: 82 78 license_file_is_valid(licfid) … … 91 87 fid.close() 92 88 os.remove(license_filename) 93 94 95 def NOtest_license_file_is_valid(self): 89 os.remove(tmp_name) 90 91 92 93 94 def test_license_file_is_not_valid2(self): 95 """Basic test using an invalid XML file. This one 96 should fail on Not Publishable 97 """ 98 99 # Generate invalid checksum example 100 101 tmp_fd , tmp_name = mkstemp(suffix='.asc', dir='.') 102 fid = os.fdopen(tmp_fd, 'w') 103 104 string = 'Example data file with textual content. AAAABBBBCCCC1234' 105 fid.write(string) 106 fid.close() 107 108 # Create associated license file 109 basename, ext = os.path.splitext(tmp_name) 110 license_filename = basename + '.lic' 111 112 licfid = open(license_filename, 'w') 113 xml_string = """<?xml version="1.0" encoding="iso-8859-1"?> 114 115 <ga_license_file> 116 <metadata> 117 <author>Ole Nielsen</author> 118 <svn_keywords> 119 <author>$Author: ole $</author> 120 <date>$Date: 2008-01-21 18:58:15 +1100 (Mon, 21 Jan 2008) $</date> 121 <revision>$Revision$</revision> 122 <url>$URL: https://datamining.anu.edu.au/svn/ga/anuga_core/source/anuga/utilities/mainland_only.lic $</url> 123 <id>$Id: mainland_only.lic 4963 2008-01-21 07:58:15Z ole $</id> 124 </svn_keywords> 125 </metadata> 126 <datafile> 127 <filename>%s</filename> 128 <checksum>-1484449438</checksum> 129 <publishable>no</publishable> 130 <accountable>Jane Sexton</accountable> 131 <source>Unknown</source> 132 <IP_owner>Geoscience Australia</IP_owner> 133 <IP_info>This is a polygon comprising easting and northing locations</IP_info> 134 </datafile> 135 136 </ga_license_file> 137 """ %tmp_name 138 139 licfid.write(xml_string) 140 licfid.close() 141 142 licfid = open(license_filename) 143 #print licfid.read() 144 145 146 try: 147 license_file_is_valid(licfid) 148 except NotPublishable: 149 pass 150 else: 151 msg = 'Should have raised NotPublishable exception' 152 raise Exception, msg 153 154 # Clean up 155 licfid.close() 156 fid.close() 157 os.remove(license_filename) 158 os.remove(tmp_name) 159 160 161 162 def test_license_file_is_not_valid3(self): 163 """Basic test using an invalid XML file. This one 164 should fail on Filename Mismatch 165 """ 166 167 168 tmp_fd , tmp_name = mkstemp(suffix='.asc', dir='.') 169 fid = os.fdopen(tmp_fd, 'w') 170 171 string = 'Example data file with textual content. AAAABBBBCCCC1234' 172 fid.write(string) 173 fid.close() 174 175 # Create associated license file 176 basename, ext = os.path.splitext(tmp_name) 177 license_filename = basename + '.lic' 178 179 licfid = open(license_filename, 'w') 180 xml_string = """<?xml version="1.0" encoding="iso-8859-1"?> 181 182 <ga_license_file> 183 <metadata> 184 <author>Ole Nielsen</author> 185 <svn_keywords> 186 <author>$Author: ole $</author> 187 <date>$Date: 2008-01-21 18:58:15 +1100 (Mon, 21 Jan 2008) $</date> 188 <revision>$Revision$</revision> 189 <url>$URL:$</url> 190 <id>$Id:$</id> 191 </svn_keywords> 192 </metadata> 193 <datafile> 194 <filename>%s</filename> 195 <checksum>-1484449438</checksum> 196 <publishable>Yes</publishable> 197 <accountable>Jane Sexton</accountable> 198 <source>Unknown</source> 199 <IP_owner>Geoscience Australia</IP_owner> 200 <IP_info>This is a polygon comprising easting and northing locations</IP_info> 201 </datafile> 202 203 </ga_license_file> 204 """ %(basename + '.no_exist') 205 206 207 licfid.write(xml_string) 208 licfid.close() 209 210 licfid = open(license_filename) 211 #print licfid.read() 212 213 214 try: 215 license_file_is_valid(licfid) 216 except FilenameMismatch: 217 pass 218 else: 219 msg = 'Should have raised FilenameMismatch exception' 220 raise Exception, msg 221 222 # Clean up 223 licfid.close() 224 fid.close() 225 os.remove(license_filename) 226 os.remove(tmp_name) 227 228 229 230 231 def test_license_file_is_valid(self): 96 232 """Basic test using an valid XML file 97 233 """ 98 99 # FIXME(Ole): NOT FINISHED100 234 101 235 # Generate valid example 102 103 fid = NamedTemporaryFile(mode='w', 104 suffix='.asc', 105 dir='.') 106 string = 'Example data file with textual content. AAAABBBBCCCC1234' 107 fid.write(string) 108 fid.flush() 236 tmp_fd , tmp_name = mkstemp(suffix='.asc', dir='.') 237 fid = os.fdopen(tmp_fd, 'w') 238 239 string = 'Example data file with textual content. AAAABBBBCCCC1234' 240 fid.write(string) 241 fid.close() 109 242 110 243 # Strip leading dir (./) 111 data_filename = os.path.split( fid.name)[1]112 113 print 'Name', data_filename244 data_filename = os.path.split(tmp_name)[1] 245 246 #print 'Name', data_filename 114 247 115 248 # Create associated license file 116 basename, ext = os.path.splitext( fid.name)249 basename, ext = os.path.splitext(tmp_name) 117 250 license_filename = basename + '.lic' 118 251 … … 142 275 143 276 </ga_license_file> 144 """ %(data_filename, '000') 145 146 licfid.write(xml_string) 147 licfid.close() 148 149 licfid = open(license_filename) 150 #print licfid.read() 151 152 #print fid.name, license_filename 153 154 print os.listdir('.') 155 license_file_is_valid(licfid, verbose=True) 156 157 # Clean up 158 licfid.close() 159 fid.close() 160 os.remove(license_filename) 161 277 """ %(data_filename, '-1484449438') 278 279 licfid.write(xml_string) 280 licfid.close() 281 282 licfid = open(license_filename) 283 284 license_file_is_valid(licfid)#, verbose=True) 285 286 # Clean up 287 os.remove(license_filename) 288 os.remove(tmp_name) 289 290 291 292 293 def test_valid_license_file_with_multiple_files(self): 294 """Test of XML file with more than one datafile element. 295 """ 296 297 # Generate example files 298 tmp_fd , tmp_name = mkstemp(suffix='.asc', dir='.') 299 fid = os.fdopen(tmp_fd, 'w') 300 string = 'Example data file with textual content. AAAABBBBCCCC1234' 301 fid.write(string) 302 fid.close() 303 304 # Derive filenames 305 basename, ext = os.path.splitext(tmp_name) 306 data_filename1 = basename + '.asc' 307 data_filename2 = basename + '.prj' 308 license_filename = basename + '.lic' 309 #print data_filename1, data_filename2, license_filename 310 311 # Write data to second data file 312 fid = open(data_filename2, 'w') 313 string = 'Another example data file with text in it' 314 fid.write(string) 315 fid.close() 316 317 # Create license file 318 licfid = open(license_filename, 'w') 319 xml_string = """<?xml version="1.0" encoding="iso-8859-1"?> 320 321 <ga_license_file> 322 <metadata> 323 <author>Ole Nielsen</author> 324 <svn_keywords> 325 <author>$Author$</author> 326 <date>$Date$</date> 327 <revision>$Revision$</revision> 328 <url>$URL:$</url> 329 <id>$Id$</id> 330 </svn_keywords> 331 </metadata> 332 <datafile> 333 <filename>%s</filename> 334 <checksum>%s</checksum> 335 <publishable>Yes</publishable> 336 <accountable>Jane Sexton</accountable> 337 <source>Generated on the fly</source> 338 <IP_owner>Geoscience Australia</IP_owner> 339 <IP_info>This is a test</IP_info> 340 </datafile> 341 <datafile> 342 <filename>%s</filename> 343 <checksum>%s</checksum> 344 <publishable>Yes</publishable> 345 <accountable>Ole Nielsen</accountable> 346 <source>Generated on the fly</source> 347 <IP_owner>Geoscience Australia</IP_owner> 348 <IP_info>This is another test</IP_info> 349 </datafile> 350 </ga_license_file> 351 """ %(data_filename1, '-1484449438', data_filename2, '-1322430740') 352 353 licfid.write(xml_string) 354 licfid.close() 355 356 licfid = open(license_filename) 357 358 license_file_is_valid(licfid)#, verbose=True) 359 360 # Clean up 361 os.remove(license_filename) 362 os.remove(data_filename1) 363 os.remove(data_filename2) 364 365 366 162 367 163 368 -
anuga_core/source/anuga/utilities/test_xml_tools.py
r5009 r5014 49 49 assert doc['second element']['texts']['title 4'] == 'example text 4' 50 50 51 51 assert doc.has_key('first element') 52 52 53 53 … … 175 175 176 176 os.remove(tmp_name) 177 178 179 def test_duplicate_tags(self): 180 """Test handling of duplicate tags. 181 """ 182 183 X1 = XML_element(tag='datafile', 184 value=XML_element(tag='some_text', 185 value='hello world')) 186 187 188 X2 = XML_element(tag='second_element', 189 value=XML_element(tag='texts', 190 value='egg and spam')) 191 X3 = XML_element(tag='datafile', 192 value='42') 193 194 195 # Need to have one main element according to minidom 196 main = XML_element(tag='all', value=[X1, X2, X3]) 197 xmldoc = XML_element(value=main) 198 #print xmldoc 199 200 tmp_fd , tmp_name = mkstemp(suffix='.xml', dir='.') 201 fid = os.fdopen(tmp_fd, 'w') 202 203 fid.write(str(xmldoc)) 204 fid.close() 205 206 # Now read it back 207 xmlobject = xml2object(tmp_name, verbose=True) 208 #print xmlobject 209 210 assert str(xmldoc) == str(xmlobject) 211 212 assert xmlobject['all'].has_key('datafile') 213 214 215 assert len(xmlobject['all']['datafile']) == 2 216 #print xmlobject['all']['datafile'] 217 218 os.remove(tmp_name) 219 220 177 221 178 222 #------------------------------------------------------------- -
anuga_core/source/anuga/utilities/xml_tools.py
r5009 r5014 64 64 65 65 66 def remove_whitespace(s): 67 """Remove excess whitespace including newlines from string 68 """ 69 import string 70 words = s.split() # Split on whitespace 71 72 return string.join(words) 73 74 #return s.replace('\n', '') 75 #s.translate(string.maketrans) 76 66 77 67 78 … … 130 141 s += '<%s>' %self.tag 131 142 if isinstance(self.value, basestring): 132 s += self.value143 s += remove_whitespace(self.value) 133 144 else: 134 145 s += '\n' … … 148 159 This will allow statements such as 149 160 150 assert xmlobject['datafile']['accountable'] == 'Jane Sexton' 151 """ 152 161 assert xmlobject['datafile']['accountable'] == 'Jane Sexton' 162 163 If more than one element matches the given key a list of all 164 matches will be returned 165 """ 166 167 result = [] 153 168 for node in self.value: 154 169 if node.tag == key: 155 170 if isinstance(node.value, basestring): 156 return node.value 171 result.append(str(node.value)) 172 #return node.value 157 173 else: 158 return node 174 result.append(node) 175 #return node 176 177 if len(result) == 0: 178 return None 179 if len(result) == 1: 180 return result[0] 181 if len(result) > 1: 182 return result 183 184 185 def has_key(self, key): 186 found = False 187 for node in self.value: 188 if node.tag == key: 189 found = True 190 191 return found 192 159 193 160 194 def keys(self): … … 202 236 fid = xml 203 237 204 #print fid.read()205 238 dom = parse(fid) 206 239
Note: See TracChangeset
for help on using the changeset viewer.