Skip to content

Commit

Permalink
(#1597) summarize warnings at end of test invocations
Browse files Browse the repository at this point in the history
  • Loading branch information
drewbanin committed Aug 3, 2019
1 parent 4478a89 commit 800355e
Show file tree
Hide file tree
Showing 7 changed files with 96 additions and 36 deletions.
18 changes: 9 additions & 9 deletions core/dbt/compilation.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,21 +25,21 @@

def print_compile_stats(stats):
names = {
NodeType.Model: 'models',
NodeType.Test: 'tests',
NodeType.Snapshot: 'snapshots',
NodeType.Analysis: 'analyses',
NodeType.Macro: 'macros',
NodeType.Operation: 'operations',
NodeType.Seed: 'seed files',
NodeType.Source: 'sources',
NodeType.Model: 'model',
NodeType.Test: 'test',
NodeType.Snapshot: 'snapshot',
NodeType.Analysis: 'analyse',
NodeType.Macro: 'macro',
NodeType.Operation: 'operation',
NodeType.Seed: 'seed file',
NodeType.Source: 'source',
}

results = {k: 0 for k in names.keys()}
results.update(stats)

stat_line = ", ".join(
["{} {}".format(ct, names.get(t)) for t, ct in results.items()])
[dbt.utils.pluralize(ct, names.get(t)) for t, ct in results.items()])

logger.notice("Found {}".format(stat_line))

Expand Down
16 changes: 12 additions & 4 deletions core/dbt/contracts/results.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,22 +130,24 @@ def failed(self):
'type': 'boolean',
'description': 'True if this node was skipped',
},
# This is assigned by dbt.ui.printer.print_test_result_line, if a test
# has no error and a non-zero status
'warn': {
'type': ['boolean', 'null'],
'description': 'True if this node succeeded with a warning',
},
'fail': {
'type': ['boolean', 'null'],
'description': 'On tests, true if the test failed',
},
},
'required': ['skip', 'fail']
'required': ['skip', 'fail', 'warn']
})


class RunModelResult(NodeSerializable):
SCHEMA = RUN_MODEL_RESULT_CONTRACT

def __init__(self, node, error=None, skip=False, status=None, failed=None,
thread_id=None, timing=None, execution_time=0):
warned=None, thread_id=None, timing=None, execution_time=0):
if timing is None:
timing = []
super(RunModelResult, self).__init__(
Expand All @@ -154,6 +156,7 @@ def __init__(self, node, error=None, skip=False, status=None, failed=None,
skip=skip,
status=status,
fail=failed,
warn=warned,
execution_time=execution_time,
thread_id=thread_id,
timing=timing,
Expand All @@ -163,6 +166,7 @@ def __init__(self, node, error=None, skip=False, status=None, failed=None,
error = named_property('error',
'If there was an error, the text of that error')
skip = named_property('skip', 'True if the model was skipped')
warn = named_property('warn', 'True if this was a test and it warned')
fail = named_property('fail', 'True if this was a test and it failed')
status = named_property('status', 'The status of the model execution')
execution_time = named_property('execution_time',
Expand All @@ -180,6 +184,10 @@ def __init__(self, node, error=None, skip=False, status=None, failed=None,
def failed(self):
return self.fail

@property
def warned(self):
return self.warn

@property
def skipped(self):
return self.skip
Expand Down
16 changes: 13 additions & 3 deletions core/dbt/node_runners.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
import dbt.utils
import dbt.tracking
import dbt.ui.printer
import dbt.flags
from dbt import rpc

import threading
Expand Down Expand Up @@ -77,7 +78,7 @@ def run_with_hooks(self, manifest):
return result

def _build_run_result(self, node, start_time, error, status, timing_info,
skip=False, failed=None):
skip=False, failed=None, warn=None):
execution_time = time.time() - start_time
thread_id = threading.current_thread().name
timing = [t.serialize() for t in timing_info]
Expand All @@ -87,6 +88,7 @@ def _build_run_result(self, node, start_time, error, status, timing_info,
skip=skip,
status=status,
failed=failed,
warned=warn,
execution_time=execution_time,
thread_id=thread_id,
timing=timing
Expand Down Expand Up @@ -118,6 +120,7 @@ def from_run_result(self, result, start_time, timing_info):
skip=result.skip,
status=result.status,
failed=result.failed,
warn=result.warned,
timing_info=timing_info
)

Expand Down Expand Up @@ -478,8 +481,15 @@ def before_execute(self):
self.print_start_line()

def execute(self, test, manifest):
status = self.execute_test(test)
return RunModelResult(test, status=status)
failed_rows = self.execute_test(test)
severity = test.config['severity'].upper()

if failed_rows == 0:
return RunModelResult(test, status=failed_rows)
elif severity == 'ERROR' or dbt.flags.WARN_ERROR:
return RunModelResult(test, status=failed_rows, failed=True)
else:
return RunModelResult(test, status=failed_rows, warned=True)

def after_execute(self, result):
self.print_result_line(result)
Expand Down
58 changes: 38 additions & 20 deletions core/dbt/ui/printer.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ def get_counts(flat_nodes):
counts[t] = counts.get(t, 0) + 1

stat_line = ", ".join(
["{} {}s".format(v, k) for k, v in counts.items()])
[dbt.utils.pluralize(v, k) for k, v in counts.items()])

return stat_line

Expand Down Expand Up @@ -145,25 +145,23 @@ def get_printable_result(result, success, error):

def print_test_result_line(result, schema_name, index, total):
model = result.node
info = 'PASS'

if result.error is not None:
info = "ERROR"
color = red

elif result.status > 0:
severity = result.node.config['severity'].upper()
if severity == 'ERROR' or dbt.flags.WARN_ERROR:
info = 'FAIL {}'.format(result.status)
color = red
result.fail = True
else:
info = 'WARN {}'.format(result.status)
color = yellow
elif result.status == 0:
info = 'PASS'
color = green

elif result.warn:
info = 'WARN {}'.format(result.status)
color = yellow

elif result.fail:
info = 'FAIL {}'.format(result.status)
color = red

else:
raise RuntimeError("unexpected status: {}".format(result.status))

Expand Down Expand Up @@ -259,6 +257,8 @@ def interpret_run_result(result):
return 'error'
elif result.skipped:
return 'skip'
elif result.warned:
return 'warn'
else:
return 'pass'

Expand All @@ -268,6 +268,7 @@ def print_run_status_line(results):
'error': 0,
'skip': 0,
'pass': 0,
'warn': 0,
'total': 0,
}

Expand All @@ -276,20 +277,28 @@ def print_run_status_line(results):
stats[result_type] += 1
stats['total'] += 1

stats_line = "\nDone. PASS={pass} ERROR={error} SKIP={skip} TOTAL={total}"
stats_line = "\nDone. PASS={pass} WARN={warn} ERROR={error} SKIP={skip} TOTAL={total}" # noqa
logger.info(stats_line.format(**stats))


def print_run_result_error(result, newline=True):
def print_run_result_error(result, newline=True, is_warning=False):
if newline:
logger.info("")

if result.failed:
logger.info(yellow("Failure in {} {} ({})").format(
if result.failed or (is_warning and result.warned):
if is_warning:
color = yellow
info = 'Warning'
else:
color = red
info = 'Failure'
logger.info(color("{} in {} {} ({})").format(
info,
result.node.get('resource_type'),
result.node.get('name'),
result.node.get('original_file_path')))
logger.info(" Got {} results, expected 0.".format(result.status))
status = dbt.utils.pluralize(result.status, 'result')
logger.info(" Got {}, expected 0.".format(status))

if result.node.get('build_path') is not None:
logger.info("")
Expand All @@ -314,11 +323,16 @@ def print_skip_caused_by_error(model, schema, relation, index, num_models,
print_run_result_error(result, newline=False)


def print_end_of_run_summary(num_errors, early_exit=False):
def print_end_of_run_summary(num_errors, num_warnings, early_exit=False):
error_plural = dbt.utils.pluralize(num_errors, 'error')
warn_plural = dbt.utils.pluralize(num_warnings, 'warning')
if early_exit:
message = yellow('Exited because of keyboard interrupt.')
elif num_errors > 0:
message = red('Completed with {} errors:'.format(num_errors))
message = red("Completed with {} and {}:".format(
error_plural, warn_plural))
elif num_warnings > 0:
message = yellow('Completed with {}:'.format(warn_plural))
else:
message = green('Completed successfully')

Expand All @@ -328,9 +342,13 @@ def print_end_of_run_summary(num_errors, early_exit=False):

def print_run_end_messages(results, early_exit=False):
errors = [r for r in results if r.error is not None or r.failed]
print_end_of_run_summary(len(errors), early_exit)
warnings = [r for r in results if r.warned]
print_end_of_run_summary(len(errors), len(warnings), early_exit)

for error in errors:
print_run_result_error(error)
print_run_result_error(error, is_warning=False)

for warning in warnings:
print_run_result_error(warning, is_warning=True)

print_run_status_line(results)
7 changes: 7 additions & 0 deletions core/dbt/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -481,3 +481,10 @@ def translate_aliases(kwargs, aliases):
result[canonical_key] = value

return result


def pluralize(count, string):
if count == 1:
return "{} {}".format(count, string)
else:
return "{} {}s".format(count, string)
Original file line number Diff line number Diff line change
Expand Up @@ -1878,6 +1878,7 @@ def expected_run_results(self, quote_schema=True, quote_model=False,
'error': None,
'execution_time': AnyFloat(),
'fail': None,
'warn': None,
'node': {
'alias': 'model',
'build_path': _normalize(
Expand Down Expand Up @@ -1930,6 +1931,7 @@ def expected_run_results(self, quote_schema=True, quote_model=False,
'error': None,
'execution_time': AnyFloat(),
'fail': None,
'warn': None,
'node': {
'alias': 'seed',
'build_path': _normalize(
Expand Down Expand Up @@ -1980,6 +1982,7 @@ def expected_run_results(self, quote_schema=True, quote_model=False,
'error': None,
'execution_time': AnyFloat(),
'fail': None,
'warn': None,
'node': {
'alias': 'not_null_model_id',
'build_path': _normalize('target/compiled/test/schema_test/not_null_model_id.sql'),
Expand Down Expand Up @@ -2030,6 +2033,7 @@ def expected_run_results(self, quote_schema=True, quote_model=False,
'error': None,
'execution_time': AnyFloat(),
'fail': None,
'warn': None,
'node': {
'alias': 'nothing_model_',
'build_path': _normalize('target/compiled/test/schema_test/nothing_model_.sql'),
Expand Down Expand Up @@ -2079,6 +2083,7 @@ def expected_run_results(self, quote_schema=True, quote_model=False,
'error': None,
'execution_time': AnyFloat(),
'fail': None,
'warn': None,
'node': {
'alias': 'unique_model_id',
'build_path': _normalize('target/compiled/test/schema_test/unique_model_id.sql'),
Expand Down Expand Up @@ -2156,6 +2161,7 @@ def expected_postgres_references_run_results(self):
'error': None,
'execution_time': AnyFloat(),
'fail': None,
'warn': None,
'node': {
'alias': 'ephemeral_summary',
'build_path': _normalize(
Expand Down Expand Up @@ -2244,6 +2250,7 @@ def expected_postgres_references_run_results(self):
'error': None,
'execution_time': AnyFloat(),
'fail': None,
'warn': None,
'node': {
'alias': 'view_summary',
'build_path': _normalize(
Expand Down Expand Up @@ -2331,6 +2338,7 @@ def expected_postgres_references_run_results(self):
'error': None,
'execution_time': AnyFloat(),
'fail': None,
'warn': None,
'node': {
'alias': 'seed',
'build_path': _normalize(
Expand Down
9 changes: 9 additions & 0 deletions test/integration/045_test_severity_tests/test_severity.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,10 @@ def test_postgres_severity_warnings(self):
results = self.run_dbt_with_vars(['test', '--schema'], 'false', strict=False)
self.assertEqual(len(results), 2)
self.assertFalse(results[0].fail)
self.assertTrue(results[0].warn)
self.assertEqual(results[0].status, 2)
self.assertFalse(results[1].fail)
self.assertTrue(results[1].warn)
self.assertEqual(results[1].status, 2)

@use_profile('postgres')
Expand All @@ -39,8 +41,10 @@ def test_postgres_severity_rendered_errors(self):
results = self.run_dbt_with_vars(['test', '--schema'], 'true', strict=False, expect_pass=False)
self.assertEqual(len(results), 2)
self.assertTrue(results[0].fail)
self.assertFalse(results[0].warn)
self.assertEqual(results[0].status, 2)
self.assertTrue(results[1].fail)
self.assertFalse(results[1].warn)
self.assertEqual(results[1].status, 2)

@use_profile('postgres')
Expand All @@ -50,8 +54,10 @@ def test_postgres_severity_warnings_strict(self):
results = self.run_dbt_with_vars(['test', '--schema'], 'false', expect_pass=False)
self.assertEqual(len(results), 2)
self.assertTrue(results[0].fail)
self.assertFalse(results[0].warn)
self.assertEqual(results[0].status, 2)
self.assertTrue(results[1].fail)
self.assertFalse(results[1].warn)
self.assertEqual(results[1].status, 2)

@use_profile('postgres')
Expand All @@ -61,6 +67,7 @@ def test_postgres_data_severity_warnings(self):
results = self.run_dbt_with_vars(['test', '--data'], 'false', strict=False)
self.assertEqual(len(results), 1)
self.assertFalse(results[0].fail)
self.assertTrue(results[0].warn)
self.assertEqual(results[0].status, 2)

@use_profile('postgres')
Expand All @@ -70,6 +77,7 @@ def test_postgres_data_severity_rendered_errors(self):
results = self.run_dbt_with_vars(['test', '--data'], 'true', strict=False, expect_pass=False)
self.assertEqual(len(results), 1)
self.assertTrue(results[0].fail)
self.assertFalse(results[0].warn)
self.assertEqual(results[0].status, 2)

@use_profile('postgres')
Expand All @@ -79,4 +87,5 @@ def test_postgres_data_severity_warnings_strict(self):
results = self.run_dbt_with_vars(['test', '--data'], 'false', expect_pass=False)
self.assertEqual(len(results), 1)
self.assertTrue(results[0].fail)
self.assertFalse(results[0].warn)
self.assertEqual(results[0].status, 2)

0 comments on commit 800355e

Please sign in to comment.