From 4c02b4a6c353fd4d529a000d963724f92aa3bb0d Mon Sep 17 00:00:00 2001 From: Jacob Beck Date: Wed, 1 May 2019 07:11:29 -0600 Subject: [PATCH 1/4] Make an stderr handler available as well and provide a way to swap between them --- core/dbt/logger.py | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/core/dbt/logger.py b/core/dbt/logger.py index f658769172f..3ae7418eb27 100644 --- a/core/dbt/logger.py +++ b/core/dbt/logger.py @@ -50,6 +50,11 @@ def notice(self, msg, *args, **kwargs): stdout_handler.setFormatter(logging.Formatter('%(message)s')) stdout_handler.setLevel(NOTICE) +stderr_handler = logging.StreamHandler() +stderr_handler.setFormatter(logging.Formatter('%(message)s')) +stderr_handler.setLevel(WARNING) + + logger = logging.getLogger('dbt') logger.addHandler(stdout_handler) logger.setLevel(DEBUG) @@ -79,6 +84,13 @@ def notice(self, msg, *args, **kwargs): initialized = False +def log_to_stderr(logger): + if stdout_handler in logger.handlers: + logger.handlers.remove(stdout_handler) + if stderr_handler not in logger.handlers: + logger.addHandler(stderr_handler) + + def make_log_dir_if_missing(log_dir): import dbt.clients.system dbt.clients.system.make_directory(log_dir) @@ -99,14 +111,17 @@ def default_formatter(): def initialize_logger(debug_mode=False, path=None): - global initialized, logger, stdout_handler + global initialized, logger, stdout_handler, stderr_handler if initialized: return if debug_mode: + # we'll only use one of these, but just set both up stdout_handler.setFormatter(default_formatter()) stdout_handler.setLevel(DEBUG) + stderr_handler.setFormatter(default_formatter()) + stderr_handler.setLevel(DEBUG) if path is not None: make_log_dir_if_missing(path) From e043643a54b5912cfaf06b9e1a272571454b7327 Mon Sep 17 00:00:00 2001 From: Jacob Beck Date: Wed, 1 May 2019 07:12:12 -0600 Subject: [PATCH 2/4] Add a new ListTask, and 'dbt list'/'dbt ls' All tasks now have a 'pre_init_hook' classmethod, called by main - runs after args are parsed, before anything else --- core/dbt/logger.py | 16 +- core/dbt/main.py | 43 ++- core/dbt/parser/base_sql.py | 7 +- core/dbt/task/base.py | 4 + core/dbt/task/list.py | 120 +++++++ .../047_dbt_ls_test/analyses/analysis.sql | 1 + .../047_dbt_ls_test/archives/archive.sql | 12 + .../integration/047_dbt_ls_test/data/seed.csv | 2 + .../047_dbt_ls_test/macros/macro_stuff.sql | 7 + .../047_dbt_ls_test/models/docs.md | 3 + .../047_dbt_ls_test/models/outer.sql | 1 + .../047_dbt_ls_test/models/schema.yml | 15 + .../047_dbt_ls_test/models/sub/inner.sql | 1 + test/integration/047_dbt_ls_test/test_ls.py | 299 ++++++++++++++++++ 14 files changed, 524 insertions(+), 7 deletions(-) create mode 100644 core/dbt/task/list.py create mode 100644 test/integration/047_dbt_ls_test/analyses/analysis.sql create mode 100644 test/integration/047_dbt_ls_test/archives/archive.sql create mode 100644 test/integration/047_dbt_ls_test/data/seed.csv create mode 100644 test/integration/047_dbt_ls_test/macros/macro_stuff.sql create mode 100644 test/integration/047_dbt_ls_test/models/docs.md create mode 100644 test/integration/047_dbt_ls_test/models/outer.sql create mode 100644 test/integration/047_dbt_ls_test/models/schema.yml create mode 100644 test/integration/047_dbt_ls_test/models/sub/inner.sql create mode 100644 test/integration/047_dbt_ls_test/test_ls.py diff --git a/core/dbt/logger.py b/core/dbt/logger.py index 3ae7418eb27..a1b3e590bad 100644 --- a/core/dbt/logger.py +++ b/core/dbt/logger.py @@ -84,11 +84,19 @@ def notice(self, msg, *args, **kwargs): initialized = False +def _swap_handler(logger, old, new): + if old in logger.handlers: + logger.handlers.remove(old) + if new not in logger.handlers: + logger.addHandler(new) + + def log_to_stderr(logger): - if stdout_handler in logger.handlers: - logger.handlers.remove(stdout_handler) - if stderr_handler not in logger.handlers: - logger.addHandler(stderr_handler) + _swap_handler(logger, stdout_handler, stderr_handler) + + +def log_to_stdout(logger): + _swap_handler(logger, stderr_handler, stdout_handler) def make_log_dir_if_missing(log_dir): diff --git a/core/dbt/main.py b/core/dbt/main.py index d7a869be6ef..f270ed3b21d 100644 --- a/core/dbt/main.py +++ b/core/dbt/main.py @@ -22,6 +22,7 @@ import dbt.task.serve as serve_task import dbt.task.freshness as freshness_task import dbt.task.run_operation as run_operation_task +from dbt.task.list import ListTask from dbt.task.rpc_server import RPCServerTask from dbt.adapters.factory import reset_adapters @@ -183,6 +184,7 @@ def run_from_args(parsed): log_cache_events(getattr(parsed, 'log_cache_events', False)) update_flags(parsed) + parsed.cls.pre_init_hook() logger.info("Running with dbt{}".format(dbt.version.installed)) # this will convert DbtConfigErrors into RuntimeExceptions @@ -569,6 +571,42 @@ def _build_rpc_subparser(subparsers, base_subparser): return sub +def _build_list_subparser(subparsers, base_subparser): + sub = subparsers.add_parser( + 'list', + parents=[base_subparser], + help='list models' + ) + sub.set_defaults(cls=ListTask, which='list') + resource_values = list(ListTask.ALL_RESOURCE_VALUES) + ['default', 'all'] + sub.add_argument('--resource-type', + choices=resource_values, + action='append', + default=[], + dest='resource_types') + sub.add_argument('--output', + choices=['json', 'name', 'path', 'selector'], + default='selector') + sub.add_argument( + '-s', + '--select', + required=False, + nargs='+', + help="Specify the nodes to select.", + dest='models' + ) + sub.add_argument( + '--exclude', + required=False, + nargs='+', + help="Specify the models to exclude." + ) + # in python 3.x you can use the 'aliases' kwarg, but in python 2.7 you get + # to do this + subparsers._name_parser_map['ls'] = sub + return sub + + def parse_args(args): p = DBTArgumentParser( prog='dbt: data build tool', @@ -645,14 +683,15 @@ def parse_args(args): # make the subcommands that have their own subcommands docs_sub = _build_docs_subparser(subs, base_subparser) - docs_subs = docs_sub.add_subparsers() + docs_subs = docs_sub.add_subparsers(title="Available sub-commands") source_sub = _build_source_subparser(subs, base_subparser) - source_subs = source_sub.add_subparsers() + source_subs = source_sub.add_subparsers(title="Available sub-commands") _build_init_subparser(subs, base_subparser) _build_clean_subparser(subs, base_subparser) _build_debug_subparser(subs, base_subparser) _build_deps_subparser(subs, base_subparser) + _build_list_subparser(subs, base_subparser) archive_sub = _build_archive_subparser(subs, base_subparser) rpc_sub = _build_rpc_subparser(subs, base_subparser) diff --git a/core/dbt/parser/base_sql.py b/core/dbt/parser/base_sql.py index 2a576ffaaf9..e359d2c67b7 100644 --- a/core/dbt/parser/base_sql.py +++ b/core/dbt/parser/base_sql.py @@ -47,9 +47,14 @@ def load_and_parse(self, package_name, root_dir, relative_dirs, path = self.get_compiled_path(name, file_match.get('relative_path')) + # TODO(jeb): Why would the original file path rely on the compiled + # path? + # original_file_path = os.path.join( + # file_match.get('searched_path'), + # path) original_file_path = os.path.join( file_match.get('searched_path'), - path) + file_match.get('relative_path')) result.append({ 'name': name, diff --git a/core/dbt/task/base.py b/core/dbt/task/base.py index 6a7a6b2be7e..3fb3e09feea 100644 --- a/core/dbt/task/base.py +++ b/core/dbt/task/base.py @@ -46,6 +46,10 @@ def __init__(self, args, config): self.args = args self.config = config + @classmethod + def pre_init_hook(cls): + """A hook called before the task is initialized.""" + @classmethod def from_args(cls, args): try: diff --git a/core/dbt/task/list.py b/core/dbt/task/list.py new file mode 100644 index 00000000000..b4095b46c0b --- /dev/null +++ b/core/dbt/task/list.py @@ -0,0 +1,120 @@ +from __future__ import print_function + +import json + +from dbt.task.runnable import GraphRunnableTask, ManifestTask +from dbt.node_types import NodeType +import dbt.exceptions +from dbt.logger import GLOBAL_LOGGER as logger +from dbt.logger import log_to_stderr + + +class ListTask(GraphRunnableTask): + DEFAULT_RESOURCE_VALUES = frozenset(( + NodeType.Model, + NodeType.Archive, + NodeType.Seed, + NodeType.Test, + NodeType.Source, + )) + ALL_RESOURCE_VALUES = DEFAULT_RESOURCE_VALUES | frozenset(( + NodeType.Analysis, + )) + ALLOWED_KEYS = frozenset(( + 'alias', + 'name', + 'package_name', + 'depends_on', + 'tags', + 'config', + 'resource_type', + 'source_name', + )) + + def __init__(self, args, config): + super(ListTask, self).__init__(args, config) + self.config.args.single_threaded = True + + @classmethod + def pre_init_hook(cls): + """A hook called before the task is initialized.""" + log_to_stderr(logger) + + def _iterate_selected_nodes(self): + nodes = sorted(self.select_nodes()) + if not nodes: + logger.warning('No nodes selected!') + return + for node in nodes: + yield self.manifest.nodes[node] + + def generate_selectors(self): + for node in self._iterate_selected_nodes(): + if node.resource_type == NodeType.Source: + yield 'source:{}'.format(node.unique_id) + else: + yield node.unique_id + + def generate_names(self): + for node in self._iterate_selected_nodes(): + if node.resource_type == NodeType.Source: + yield '{0.source_name}.{0.name}'.format(node) + else: + yield node.name + + def generate_json(self): + for node in self._iterate_selected_nodes(): + yield json.dumps({ + k: v + for k, v in node.serialize().items() + if k in self.ALLOWED_KEYS + }) + + def generate_paths(self): + for node in self._iterate_selected_nodes(): + yield node.get('original_file_path') + + def run(self): + ManifestTask._runtime_initialize(self) + output = self.config.args.output + if output == 'selector': + generator = self.generate_selectors + elif output == 'name': + generator = self.generate_names + elif output == 'json': + generator = self.generate_json + elif output == 'path': + generator = self.generate_paths + else: + raise dbt.exceptions.IternalException( + 'Invalid output {}'.format(output) + ) + for result in generator(): + self.node_results.append(result) + print(result) + return self.node_results + + @property + def resource_types(self): + values = set(self.config.args.resource_types) + if not values: + return list(self.DEFAULT_RESOURCE_VALUES) + + if 'default' in values: + values.remove('default') + values.update(self.DEFAULT_RESOURCE_VALUES) + if 'all' in values: + values.remove('all') + values.update(self.ALL_RESOURCE_VALUES) + return list(values) + + def build_query(self): + return { + "include": self.args.models, + "exclude": self.args.exclude, + "resource_types": self.resource_types, + "tags": [], + } + + def interpret_results(self, results): + return bool(results) diff --git a/test/integration/047_dbt_ls_test/analyses/analysis.sql b/test/integration/047_dbt_ls_test/analyses/analysis.sql new file mode 100644 index 00000000000..1f7d87c55a9 --- /dev/null +++ b/test/integration/047_dbt_ls_test/analyses/analysis.sql @@ -0,0 +1 @@ +select 4 as id diff --git a/test/integration/047_dbt_ls_test/archives/archive.sql b/test/integration/047_dbt_ls_test/archives/archive.sql new file mode 100644 index 00000000000..c609604eec6 --- /dev/null +++ b/test/integration/047_dbt_ls_test/archives/archive.sql @@ -0,0 +1,12 @@ +{% archive my_archive %} + {{ + config( + target_database=var('target_database', database), + target_schema=schema, + unique_key='id', + strategy='timestamp', + updated_at='updated_at', + ) + }} + select * from {{database}}.{{schema}}.seed +{% endarchive %} diff --git a/test/integration/047_dbt_ls_test/data/seed.csv b/test/integration/047_dbt_ls_test/data/seed.csv new file mode 100644 index 00000000000..cfa20f81071 --- /dev/null +++ b/test/integration/047_dbt_ls_test/data/seed.csv @@ -0,0 +1,2 @@ +a,b +1,2 diff --git a/test/integration/047_dbt_ls_test/macros/macro_stuff.sql b/test/integration/047_dbt_ls_test/macros/macro_stuff.sql new file mode 100644 index 00000000000..b05216d059a --- /dev/null +++ b/test/integration/047_dbt_ls_test/macros/macro_stuff.sql @@ -0,0 +1,7 @@ +{% macro cool_macro() %} + wow! +{% endmacro %} + +{% macro other_cool_macro(a, b) %} + cool! +{% endmacro %} diff --git a/test/integration/047_dbt_ls_test/models/docs.md b/test/integration/047_dbt_ls_test/models/docs.md new file mode 100644 index 00000000000..e658f3b11a9 --- /dev/null +++ b/test/integration/047_dbt_ls_test/models/docs.md @@ -0,0 +1,3 @@ +{% docs my_docs %} + some docs +{% enddocs %} diff --git a/test/integration/047_dbt_ls_test/models/outer.sql b/test/integration/047_dbt_ls_test/models/outer.sql new file mode 100644 index 00000000000..43258a71464 --- /dev/null +++ b/test/integration/047_dbt_ls_test/models/outer.sql @@ -0,0 +1 @@ +select 1 as id diff --git a/test/integration/047_dbt_ls_test/models/schema.yml b/test/integration/047_dbt_ls_test/models/schema.yml new file mode 100644 index 00000000000..8cfdf833aae --- /dev/null +++ b/test/integration/047_dbt_ls_test/models/schema.yml @@ -0,0 +1,15 @@ +version: 2 +models: + - name: outer + description: The outer table + columns: + - name: id + description: The id value + tests: + - unique + - not_null + +sources: + - name: my_source + tables: + - name: my_table diff --git a/test/integration/047_dbt_ls_test/models/sub/inner.sql b/test/integration/047_dbt_ls_test/models/sub/inner.sql new file mode 100644 index 00000000000..a90004d480d --- /dev/null +++ b/test/integration/047_dbt_ls_test/models/sub/inner.sql @@ -0,0 +1 @@ +select * from {{ ref('outer') }} diff --git a/test/integration/047_dbt_ls_test/test_ls.py b/test/integration/047_dbt_ls_test/test_ls.py new file mode 100644 index 00000000000..b31ed4688bd --- /dev/null +++ b/test/integration/047_dbt_ls_test/test_ls.py @@ -0,0 +1,299 @@ +from test.integration.base import DBTIntegrationTest, use_profile +from dbt.logger import log_to_stdout, GLOBAL_LOGGER + +import json +import os + + +class TestStrictUndefined(DBTIntegrationTest): + + @property + def schema(self): + return 'dbt_ls_047' + + @staticmethod + def dir(value): + return os.path.normpath('test/integration/047_dbt_ls_test/' + value) + + @property + def models(self): + return self.dir('models') + + @property + def project_config(self): + return { + 'analysis-paths': [self.dir('analyses')], + 'archive-paths': [self.dir('archives')], + 'macro-paths': [self.dir('macros')], + 'data-paths': [self.dir('data')], + } + + def run_dbt_ls(self, args=None, expect_pass=True): + log_to_stdout(GLOBAL_LOGGER) + full_args = ['ls'] + if args is not None: + full_args = full_args + args + + result = self.run_dbt(args=full_args, expect_pass=expect_pass, + strict=False, parser=False) + + log_to_stdout(GLOBAL_LOGGER) + return result + + def assertEqualJSON(self, json_str, expected): + self.assertEqual(json.loads(json_str), expected) + + def expect_given_output(self, args, expectations): + for key, values in expectations.items(): + ls_result = self.run_dbt_ls(args + ['--output', key]) + if not isinstance(values, (list, tuple)): + values = [values] + self.assertEqual(len(ls_result), len(values)) + for got, expected in zip(ls_result, values): + if key == 'json': + self.assertEqualJSON(got, expected) + else: + self.assertEqual(got, expected) + + def expect_archive_output(self): + expectations = { + 'name': 'my_archive', + 'selector': 'archive.test.my_archive', + 'json': { + 'name': 'my_archive', + 'package_name': 'test', + 'depends_on': {'nodes': [], 'macros': []}, + 'tags': [], + 'config': { + 'enabled': True, + 'materialized': 'archive', + 'post-hook': [], + 'tags': [], + 'pre-hook': [], + 'quoting': {}, + 'vars': {}, + 'column_types': {}, + 'target_database': self.default_database, + 'target_schema': self.unique_schema(), + 'unique_key': 'id', + 'strategy': 'timestamp', + 'updated_at': 'updated_at' + }, + 'alias': 'my_archive', + 'resource_type': 'archive', + }, + 'path': self.dir('archives/archive.sql'), + } + self.expect_given_output(['--resource-type', 'archive'], expectations) + + def expect_analyses_output(self): + expectations = { + 'name': 'analysis', + 'selector': 'analysis.test.analysis', + 'json': { + 'name': 'analysis', + 'package_name': 'test', + 'depends_on': {'nodes': [], 'macros': []}, + 'tags': [], + 'config': { + 'enabled': True, + 'materialized': 'view', + 'post-hook': [], + 'tags': [], + 'pre-hook': [], + 'quoting': {}, + 'vars': {}, + 'column_types': {}, + }, + 'alias': 'analysis', + 'resource_type': 'analysis', + }, + 'path': self.dir('analyses/analysis.sql'), + } + self.expect_given_output(['--resource-type', 'analysis'], expectations) + + def expect_model_output(self): + expectations = { + 'name': ('inner', 'outer'), + 'selector': ('model.test.inner', 'model.test.outer'), + 'json': ( + { + 'name': 'inner', + 'package_name': 'test', + 'depends_on': {'nodes': ['model.test.outer'], 'macros': []}, + 'tags': [], + 'config': { + 'enabled': True, + 'materialized': 'view', + 'post-hook': [], + 'tags': [], + 'pre-hook': [], + 'quoting': {}, + 'vars': {}, + 'column_types': {}, + }, + 'alias': 'inner', + 'resource_type': 'model', + }, + { + 'name': 'outer', + 'package_name': 'test', + 'depends_on': {'nodes': [], 'macros': []}, + 'tags': [], + 'config': { + 'enabled': True, + 'materialized': 'view', + 'post-hook': [], + 'tags': [], + 'pre-hook': [], + 'quoting': {}, + 'vars': {}, + 'column_types': {}, + }, + 'alias': 'outer', + 'resource_type': 'model', + }, + ), + 'path': (self.dir('models/sub/inner.sql'), self.dir('models/outer.sql')), + } + self.expect_given_output(['--resource-type', 'model'], expectations) + + def expect_source_output(self): + expectations = { + 'name': 'my_source.my_table', + 'selector': 'source:source.test.my_source.my_table', + 'json': { + 'package_name': 'test', + 'name': 'my_table', + 'source_name': 'my_source', + 'resource_type': 'source', + }, + 'path': self.dir('models/schema.yml'), + } + # should we do this --select automatically for a user if if 'source' is + # in the resource types and there is no '--select' or '--exclude'? + self.expect_given_output(['--resource-type', 'source', '--select', 'source:*'], expectations) + + def expect_seed_output(self): + expectations = { + 'name': 'seed', + 'selector': 'seed.test.seed', + 'json': { + 'name': 'seed', + 'package_name': 'test', + 'depends_on': {'nodes': [], 'macros': []}, + 'tags': [], + 'config': { + 'enabled': True, + 'materialized': 'seed', + 'post-hook': [], + 'tags': [], + 'pre-hook': [], + 'quoting': {}, + 'vars': {}, + 'column_types': {}, + }, + 'alias': 'seed', + 'resource_type': 'seed', + }, + 'path': self.dir('data/seed.csv'), + } + self.expect_given_output(['--resource-type', 'seed'], expectations) + + def expect_test_output(self): + expectations = { + 'name': ('not_null_outer_id', 'unique_outer_id'), + 'selector': ('test.test.not_null_outer_id', 'test.test.unique_outer_id'), + 'json': ( + { + 'name': 'not_null_outer_id', + 'package_name': 'test', + 'depends_on': {'nodes': ['model.test.outer'], 'macros': []}, + 'tags': ['schema'], + 'config': { + 'enabled': True, + 'materialized': 'view', + 'post-hook': [], + 'severity': 'ERROR', + 'tags': [], + 'pre-hook': [], + 'quoting': {}, + 'vars': {}, + 'column_types': {}, + }, + 'alias': 'not_null_outer_id', + 'resource_type': 'test', + + }, + { + 'name': 'unique_outer_id', + 'package_name': 'test', + 'depends_on': {'nodes': ['model.test.outer'], 'macros': []}, + 'tags': ['schema'], + 'config': { + 'enabled': True, + 'materialized': 'view', + 'post-hook': [], + 'severity': 'ERROR', + 'tags': [], + 'pre-hook': [], + 'quoting': {}, + 'vars': {}, + 'column_types': {}, + }, + 'alias': 'unique_outer_id', + 'resource_type': 'test', + }, + ), + 'path': (self.dir('models/schema.yml'), self.dir('models/schema.yml')), + } + self.expect_given_output(['--resource-type', 'test'], expectations) + + def expect_all_output(self): + expected_default = { + 'archive.test.my_archive', + 'model.test.inner', + 'model.test.outer', + 'seed.test.seed', + 'source:source.test.my_source.my_table', + 'test.test.not_null_outer_id', + 'test.test.unique_outer_id', + } + expected_all = expected_default | {'analysis.test.analysis'} + + results = self.run_dbt_ls(['--resource-type', 'all', '--select', '*', 'source:*']) + self.assertEqual(set(results), expected_all) + + results = self.run_dbt_ls(['--select', '*', 'source:*']) + self.assertEqual(set(results), expected_default) + + results = self.run_dbt_ls(['--resource-type', 'default', '--select', '*', 'source:*']) + self.assertEqual(set(results), expected_default) + + results = self.run_dbt_ls + + def expect_select(self): + results = self.run_dbt_ls(['--resource-type', 'test', '--select', 'outer']) + self.assertEqual(set(results), {'test.test.not_null_outer_id', 'test.test.unique_outer_id'}) + + self.run_dbt_ls(['--resource-type', 'test', '--select', 'inner'], expect_pass=False) + + results = self.run_dbt_ls(['--resource-type', 'test', '--select', '+inner']) + self.assertEqual(set(results), {'test.test.not_null_outer_id', 'test.test.unique_outer_id'}) + + results = self.run_dbt_ls(['--resource-type', 'model', '--select', 'outer+']) + self.assertEqual(set(results), {'model.test.outer', 'model.test.inner'}) + + results = self.run_dbt_ls(['--resource-type', 'model', '--exclude', 'inner']) + self.assertEqual(set(results), {'model.test.outer'}) + + @use_profile('postgres') + def test_postgres_ls(self): + self.expect_archive_output() + self.expect_analyses_output() + self.expect_model_output() + self.expect_source_output() + self.expect_seed_output() + self.expect_test_output() + self.expect_select() + self.expect_all_output() From 32c567903922015e31ff2f4b0e04348f3eb65b21 Mon Sep 17 00:00:00 2001 From: Jacob Beck Date: Tue, 7 May 2019 07:57:06 -0600 Subject: [PATCH 3/4] PR Feedback Fixed error logging to display errors in dbt ls Add models flag Make all of models, select, exclude have a metavar of 'SELECTOR' for -h --- core/dbt/logger.py | 2 +- core/dbt/main.py | 17 ++++++++++++++--- core/dbt/task/list.py | 24 ++++++++++++++++++++++-- 3 files changed, 37 insertions(+), 6 deletions(-) diff --git a/core/dbt/logger.py b/core/dbt/logger.py index a1b3e590bad..7ec7426c619 100644 --- a/core/dbt/logger.py +++ b/core/dbt/logger.py @@ -50,7 +50,7 @@ def notice(self, msg, *args, **kwargs): stdout_handler.setFormatter(logging.Formatter('%(message)s')) stdout_handler.setLevel(NOTICE) -stderr_handler = logging.StreamHandler() +stderr_handler = logging.StreamHandler(sys.stderr) stderr_handler.setFormatter(logging.Formatter('%(message)s')) stderr_handler.setLevel(WARNING) diff --git a/core/dbt/main.py b/core/dbt/main.py index f270ed3b21d..18f1dacd9b6 100644 --- a/core/dbt/main.py +++ b/core/dbt/main.py @@ -93,8 +93,8 @@ def main(args=None): exit_code = e.code except BaseException as e: - logger.info("Encountered an error:") - logger.info(str(e)) + logger.warn("Encountered an error:") + logger.warn(str(e)) if logger_initialized(): logger.debug(traceback.format_exc()) @@ -592,13 +592,24 @@ def _build_list_subparser(subparsers, base_subparser): '--select', required=False, nargs='+', + metavar='SELECTOR', help="Specify the nodes to select.", - dest='models' + ) + sub.add_argument( + '-m', + '--models', + required=False, + nargs='+', + metavar='SELECTOR', + help="Specify the models to select and set the resource-type to " + "'model'. Mutually exclusive with '--select' (or '-s') and " + "'--resource-type'", ) sub.add_argument( '--exclude', required=False, nargs='+', + metavar='SELECTOR', help="Specify the models to exclude." ) # in python 3.x you can use the 'aliases' kwarg, but in python 2.7 you get diff --git a/core/dbt/task/list.py b/core/dbt/task/list.py index b4095b46c0b..55204454268 100644 --- a/core/dbt/task/list.py +++ b/core/dbt/task/list.py @@ -33,7 +33,17 @@ class ListTask(GraphRunnableTask): def __init__(self, args, config): super(ListTask, self).__init__(args, config) - self.config.args.single_threaded = True + self.args.single_threaded = True + if self.args.models: + if self.args.select: + raise dbt.exceptions.RuntimeException( + '"models" and "select" are mutually exclusive arguments' + ) + if self.args.resource_types: + raise dbt.exceptions.RuntimeException( + '"models" and "resource_type" are mutually exclusive ' + 'arguments' + ) @classmethod def pre_init_hook(cls): @@ -96,6 +106,9 @@ def run(self): @property def resource_types(self): + if self.args.models: + return [NodeType.Model] + values = set(self.config.args.resource_types) if not values: return list(self.DEFAULT_RESOURCE_VALUES) @@ -108,9 +121,16 @@ def resource_types(self): values.update(self.ALL_RESOURCE_VALUES) return list(values) + @property + def selector(self): + if self.args.models: + return self.args.models + else: + return self.args.select + def build_query(self): return { - "include": self.args.models, + "include": self.selector, "exclude": self.args.exclude, "resource_types": self.resource_types, "tags": [], From 8e426e60c9b6d40425b805ebe2d10ce9d522a285 Mon Sep 17 00:00:00 2001 From: Jacob Beck Date: Thu, 9 May 2019 10:40:46 -0600 Subject: [PATCH 4/4] remove completed TODO --- core/dbt/parser/base_sql.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/core/dbt/parser/base_sql.py b/core/dbt/parser/base_sql.py index e359d2c67b7..99a280758a0 100644 --- a/core/dbt/parser/base_sql.py +++ b/core/dbt/parser/base_sql.py @@ -47,11 +47,6 @@ def load_and_parse(self, package_name, root_dir, relative_dirs, path = self.get_compiled_path(name, file_match.get('relative_path')) - # TODO(jeb): Why would the original file path rely on the compiled - # path? - # original_file_path = os.path.join( - # file_match.get('searched_path'), - # path) original_file_path = os.path.join( file_match.get('searched_path'), file_match.get('relative_path'))