Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

use proper print() and except syntax to support python 3 #59

Merged
merged 4 commits into from
Aug 23, 2015
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pyresttest/ext/validator_jsonschema.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def validate(self, body=None, headers=None, context=None):
# TODO try draft3/draft4 iter_errors - https://python-jsonschema.readthedocs.org/en/latest/validate/#jsonschema.IValidator.iter_errors
jsonschema.validate(json.loads(body), schema)
return True
except jsonschema.exceptions.ValidationError, ve:
except jsonschema.exceptions.ValidationError as ve:
trace = traceback.format_exc()
return validators.Failure(message="JSON Schema Validation Failed", details=trace, validator=self, failure_type=validators.FAILURE_VALIDATOR_EXCEPTION)

Expand Down
12 changes: 6 additions & 6 deletions pyresttest/functionaltest.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,10 +66,10 @@ def test_get_validators(self):

test_response = resttest.run_test(test)
for failure in test_response.failures:
print "REAL FAILURE"
print "Test Failure, failure type: {0}, Reason: {1}".format(failure.failure_type, failure.message)
print("REAL FAILURE")
print("Test Failure, failure type: {0}, Reason: {1}".format(failure.failure_type, failure.message))
if failure.details:
print "Validator/Error details: "+str(failure.details)
print("Validator/Error details: "+str(failure.details))
self.assertFalse(test_response.failures)
self.assertTrue(test_response.passed)

Expand Down Expand Up @@ -153,7 +153,7 @@ def test_post(self):
test_response2 = resttest.run_test(test2)
self.assertTrue(test_response2.passed)
obj = json.loads(str(test_response2.body))
print json.dumps(obj)
print(json.dumps(obj))

def test_delete(self):
""" Try removing an item """
Expand Down Expand Up @@ -184,7 +184,7 @@ def test_full_context_use(self):

# Get absolute path to test file, in the same folder as this test
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'content-test.yaml')
print path
print(path)
tests = resttest.parse_testsets('http://localhost:8000', resttest.read_test_file(path), working_directory = os.path.dirname(os.path.realpath(__file__)))
failures = resttest.run_testsets(tests)
self.assertTrue(failures == 0, 'Simple tests failed where success expected')
Expand All @@ -195,7 +195,7 @@ def test_benchmark_get(self):
benchmark_config.url = self.prefix + '/api/person/'
benchmark_config.add_metric('total_time').add_metric('total_time','median')
benchmark_result = resttest.run_benchmark(benchmark_config)
print "Benchmark - median request time: " + str(benchmark_result.aggregates[0])
print("Benchmark - median request time: " + str(benchmark_result.aggregates[0]))
self.assertTrue(benchmark_config.benchmark_runs, len(benchmark_result.results['total_time']))

if __name__ == "__main__":
Expand Down
50 changes: 27 additions & 23 deletions pyresttest/resttest.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,16 @@
import csv
import logging
from optparse import OptionParser
from mimetools import Message # For headers handling
from email import message_from_string # For headers handling
import time

try:
from cStringIO import StringIO
except:
from StringIO import StringIO
try:
from StringIO import StringIO
except ImportError:
from io import StringIO

# Pyresttest internals
from binding import Context
Expand Down Expand Up @@ -146,7 +149,7 @@ def parse_headers(header_string):
if not headers:
return dict()
else:
header_msg = Message(StringIO(headers))
header_msg = message_from_string(headers)
return dict(header_msg.items())

def parse_testsets(base_url, test_structure, test_files = set(), working_directory = None, vars=None):
Expand Down Expand Up @@ -275,24 +278,24 @@ def run_test(mytest, test_config = TestConfig(), context = None):
result.passed = None

if test_config.interactive:
print "==================================="
print "%s" % mytest.name
print "-----------------------------------"
print "REQUEST:"
print "%s %s" % (templated_test.method, templated_test.url)
print "HEADERS:"
print "%s" % (templated_test.headers)
print("===================================")
print("%s" % mytest.name)
print("-----------------------------------")
print("REQUEST:")
print("%s %s" % (templated_test.method, templated_test.url))
print("HEADERS:")
print("%s" % (templated_test.headers))
if mytest.body is not None:
print "\n%s" % templated_test.body
print("\n%s" % templated_test.body)
raw_input("Press ENTER when ready (%d): " % (mytest.delay))

if mytest.delay > 0:
print "Delaying for %ds" % mytest.delay
print("Delaying for %ds" % mytest.delay)
time.sleep(mytest.delay)

try:
curl.perform() # Run the actual call
except Exception, e:
except Exception as e:
# Curl exception occurred (network error), do not pass go, do not collect $200
trace = traceback.format_exc()
result.failures.append(Failure(message="Curl Exception: {0}".format(e), details=trace, failure_type=validators.FAILURE_CURL_EXCEPTION))
Expand Down Expand Up @@ -322,8 +325,9 @@ def run_test(mytest, test_config = TestConfig(), context = None):
# Parse HTTP headers
try:
result.response_headers = parse_headers(result.response_headers)
except Exception, e:
result.failures.append(Failure(message="Header parsing exception: {0}".format(e), details=trace, failure_type=validators.TEST_EXCEPTION))
except Exception as e:
trace = traceback.format_exc()
result.failures.append(Failure(message="Header parsing exception: {0}".format(e), details=trace, failure_type=validators.FAILURE_TEST_EXCEPTION))
result.passed = False
curl.close()
return result
Expand Down Expand Up @@ -355,8 +359,8 @@ def run_test(mytest, test_config = TestConfig(), context = None):
# Print response body if override is set to print all *OR* if test failed (to capture maybe a stack trace)
if test_config.print_bodies or not result.passed:
if test_config.interactive:
print "RESPONSE:"
print result.body.decode("string-escape")
print("RESPONSE:")
print(result.body.decode("string-escape"))

# TODO add string escape on body output
logger.debug(result)
Expand Down Expand Up @@ -588,7 +592,7 @@ def run_testsets(testsets):

# handle stop_on_failure flag
if not result.passed and test.stop_on_failure is not None and test.stop_on_failure:
print 'STOP ON FAILURE! stopping test set execution, continuing with other test sets'
print('STOP ON FAILURE! stopping test set execution, continuing with other test sets')
break

for benchmark in mybenchmarks: # Run benchmarks, analyze, write
Expand All @@ -598,7 +602,7 @@ def run_testsets(testsets):

logger.info("Benchmark Starting: "+benchmark.name+" Group: "+benchmark.group)
benchmark_result = run_benchmark(benchmark, myconfig, context=context)
print benchmark_result
print(benchmark_result)
logger.info("Benchmark Done: "+benchmark.name+" Group: "+benchmark.group)

if benchmark.output_file: # Write file
Expand All @@ -611,17 +615,17 @@ def run_testsets(testsets):

if myinteractive:
# a break for when interactive bits are complete, before summary data
print "==================================="
print("===================================")

# Print summary results
for group in sorted(group_results.keys()):
test_count = len(group_results[group])
failures = group_failure_counts[group]
total_failures = total_failures + failures
if (failures > 0):
print u'Test Group '+group+u' FAILED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!'
print(u'Test Group '+group+u' FAILED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!')
else:
print u'Test Group '+group+u' SUCCEEDED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!'
print(u'Test Group '+group+u' SUCCEEDED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!')

return total_failures

Expand Down Expand Up @@ -661,7 +665,7 @@ def register_extensions(modules):
try:
import jsonschema
register_extensions('ext.validator_jsonschema')
except ImportError, ie:
except ImportError as ie:
logging.warn("Failed to load jsonschema validator, make sure the jsonschema module is installed if you wish to use schema validators.")

def main(args):
Expand Down
6 changes: 3 additions & 3 deletions pyresttest/test_generators.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def test_basic_ids(self):
def test_random_ids(self):
""" Test random in ids generator """
gen = generators.generator_random_int32()
print gen.next()
print(gen.next())
self.generator_repeat_test(gen)

def test_system_variables(self):
Expand Down Expand Up @@ -172,8 +172,8 @@ def test_parse_text_generator(self):
for x in xrange(0, 50):
val = gen.next()
self.assertTrue(set(val).issubset(set(myset)))
except Exception, e:
print 'Exception occurred with charset: '+charset
except Exception as e:
print('Exception occurred with charset: '+charset)
raise e

my_min = 1
Expand Down
6 changes: 3 additions & 3 deletions pyresttest/test_mini_framework_benchmarks.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,21 +4,21 @@

# Test basic pycurl create/delete, time is ~2.5 microseconds
time = timeit.timeit("mycurl=Curl(); mycurl.close()", setup="from pycurl import Curl", number=1000000)
print 'Curl create/destroy runtime for 1M runs (s)'+str(time)
print('Curl create/destroy runtime for 1M runs (s)'+str(time))

# Test test interpret/build & configuration speeds for resttest
# Runtime is 36.29 sec, so 36 microseconds per run, or 0.036 ms
time = timeit.timeit("mytest=Test.parse_test('', input); mycurl=mytest.configure_curl(); mycurl.close()",
setup='from resttest import Test; input = {"url": "/ping", "method": "DELETE", "NAME":"foo", "group":"bar", "body":"<xml>input</xml>","headers":{"Accept":"Application/json"}}',
number=1000000)
print 'Test interpret/configure test config for 1M runs (s)'+str(time)
print('Test interpret/configure test config for 1M runs (s)'+str(time))

# Just configuring the curl object from a pre-built test
# 10s/1M runs, or 0.01 ms per
time = timeit.timeit("mycurl=mytest.configure_curl(); mycurl.close()",
setup='from resttest import Test; input = {"url": "/ping", "method": "DELETE", "NAME":"foo", "group":"bar", "body":"<xml>input</xml>","headers":{"Accept":"Application/json"}}; mytest=Test.parse_test("", input);',
number=1000000)
print 'Test configure curl for 1M runs (s)'+str(time)
print('Test configure curl for 1M runs (s)'+str(time))

# Time for full curl execution on Django testing rest app
# Time: 41.4s for 10k runs, or about 4.14 ms per
Expand Down
2 changes: 1 addition & 1 deletion pyresttest/test_validators.py
Original file line number Diff line number Diff line change
Expand Up @@ -319,7 +319,7 @@ def test_validator_error_responses(self):
self.assertEqual(failure.failure_type, validators.FAILURE_VALIDATOR_FAILED)
expected_details = 'Extractor: Extractor Type: jsonpath_mini, Query: "key.val", Templated?: False'
self.assertEqual(expected_details, failure.details)
print "Failure config: "+str(failure.details)
print("Failure config: "+str(failure.details))
self.assertEqual(comp, failure.validator)

failure = comp.validate(body='{"id": 3, "key": {"val": 4}')
Expand Down
2 changes: 1 addition & 1 deletion pyresttest/tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ def update_context_after(self, response_body, context):
if self.extract_binds:
for key, value in self.extract_binds.items():
result = value.extract(body=response_body, context=context)
print 'Result: {0}'.format(result)
print('Result: {0}'.format(result))
context.bind_variable(key, result)


Expand Down