Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

adds additional augmented assignment statements (#4315) #4331

Merged
merged 3 commits into from
Nov 27, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,10 @@
### Under the hood
- Change some CompilationExceptions to ParsingExceptions ([#4254](http:/dbt-labs/dbt-core/issues/4254), [#4328](https:/dbt-core/pull/4328))
- Reorder logic for static parser sampling to speed up model parsing ([#4332](https:/dbt-labs/dbt-core/pull/4332))
- Use more augmented assignment statements ([#4315](https:/dbt-labs/dbt-core/issues/4315)), ([#4311](https:/dbt-labs/dbt-core/pull/4331))

Contributors:
[@sarah-weatherbee](https:/sarah-weatherbee) ([#4331](https:/dbt-labs/dbt-core/pull/4331))

## dbt-core 1.0.0rc2 (November 22, 2021)

Expand Down Expand Up @@ -105,7 +108,7 @@ Contributors:
- Make finding disabled nodes more consistent ([#4069](https:/dbt-labs/dbt-core/issues/4069), [#4073](https:/dbt-labas/dbt-core/pull/4073))
- Remove connection from `render_with_context` during parsing, thereby removing misleading log message ([#3137](https:/dbt-labs/dbt-core/issues/3137), [#4062](https:/dbt-labas/dbt-core/pull/4062))
- Wait for postgres docker container to be ready in `setup_db.sh`. ([#3876](https:/dbt-labs/dbt-core/issues/3876), [#3908](https:/dbt-labs/dbt-core/pull/3908))
- Prefer macros defined in the project over the ones in a package by default ([#4106](https:/dbt-labs/dbt-core/issues/4106), [#4114](https:/dbt-labs/dbt-core/pull/4114))
- Prefer macros defined in the project over the ones in a package by default ([#4106](https:/dbt-labs/dbt-core/issues/4106), [#4114](https:/dbt-labs/dbt-core/pull/4114))
- Dependency updates ([#4079](https:/dbt-labs/dbt-core/pull/4079)), ([#3532](https:/dbt-labs/dbt-core/pull/3532)
- Schedule partial parsing for SQL files with env_var changes ([#3885](https:/dbt-labs/dbt-core/issues/3885), [#4101](https:/dbt-labs/dbt-core/pull/4101))
- Schedule partial parsing for schema files with env_var changes ([#3885](https:/dbt-labs/dbt-core/issues/3885), [#4162](https:/dbt-labs/dbt-core/pull/4162))
Expand Down
2 changes: 1 addition & 1 deletion core/dbt/dataclass_schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def serialize(self, value):
out = value.isoformat()
# Assume UTC if timezone is missing
if value.tzinfo is None:
out = out + "Z"
out += "Z"
return out

def deserialize(self, value):
Expand Down
2 changes: 1 addition & 1 deletion core/dbt/graph/selector_spec.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ def dict_from_single_spec(cls, raw: str):
method_name, method_arguments = cls.parse_method(dct)
meth_name = str(method_name)
if method_arguments:
meth_name = meth_name + '.' + '.'.join(method_arguments)
meth_name += '.' + '.'.join(method_arguments)
dct['method'] = meth_name
dct = {k: v for k, v in dct.items() if (v is not None and v != '')}
if 'childrens_parents' in dct:
Expand Down
14 changes: 7 additions & 7 deletions core/dbt/parser/manifest.py
Original file line number Diff line number Diff line change
Expand Up @@ -398,7 +398,7 @@ def load_and_parse_macros(self, project_parser_files):
block = FileBlock(self.manifest.files[file_id])
parser.parse_file(block)
# increment parsed path count for performance tracking
self._perf_info.parsed_path_count = self._perf_info.parsed_path_count + 1
self._perf_info.parsed_path_count += 1
# generic tests hisotrically lived in the macros directoy but can now be nested
# in a /generic directory under /tests so we want to process them here as well
if 'GenericTestParser' in parser_files:
Expand All @@ -407,7 +407,7 @@ def load_and_parse_macros(self, project_parser_files):
block = FileBlock(self.manifest.files[file_id])
parser.parse_file(block)
# increment parsed path count for performance tracking
self._perf_info.parsed_path_count = self._perf_info.parsed_path_count + 1
self._perf_info.parsed_path_count += 1

self.build_macro_resolver()
# Look at changed macros and update the macro.depends_on.macros
Expand Down Expand Up @@ -450,15 +450,15 @@ def parse_project(
parser.parse_file(block, dct=dct)
else:
parser.parse_file(block)
project_parsed_path_count = project_parsed_path_count + 1
project_parsed_path_count += 1

# Save timing info
project_loader_info.parsers.append(ParserInfo(
parser=parser.resource_type,
parsed_path_count=project_parsed_path_count,
elapsed=time.perf_counter() - parser_start_timer
))
total_parsed_path_count = total_parsed_path_count + project_parsed_path_count
total_parsed_path_count += project_parsed_path_count

# HookParser doesn't run from loaded files, just dbt_project.yml,
# so do separately
Expand All @@ -478,7 +478,7 @@ def parse_project(
project_loader_info.parsed_path_count = (
project_loader_info.parsed_path_count + total_parsed_path_count
)
project_loader_info.elapsed = project_loader_info.elapsed + elapsed
project_loader_info.elapsed += elapsed
self._perf_info.parsed_path_count = (
self._perf_info.parsed_path_count + total_parsed_path_count
)
Expand Down Expand Up @@ -687,15 +687,15 @@ def build_manifest_state_check(self):
key_list.sort()
env_var_str = ''
for key in key_list:
env_var_str = env_var_str + f'{key}:{config.project_env_vars[key]}|'
env_var_str += f'{key}:{config.project_env_vars[key]}|'
project_env_vars_hash = FileHash.from_contents(env_var_str)

# Create a FileHash of the env_vars in the project
key_list = list(config.profile_env_vars.keys())
key_list.sort()
env_var_str = ''
for key in key_list:
env_var_str = env_var_str + f'{key}:{config.profile_env_vars[key]}|'
env_var_str += f'{key}:{config.profile_env_vars[key]}|'
profile_env_vars_hash = FileHash.from_contents(env_var_str)

# Create a FileHash of the profile file
Expand Down
12 changes: 6 additions & 6 deletions test/integration/047_dbt_ls_test/test_ls.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def run_dbt_ls(self, args=None, expect_pass=True):
log_manager.stdout_console()
full_args = ['ls']
if args is not None:
full_args = full_args + args
full_args += args

result = self.run_dbt(args=full_args, expect_pass=expect_pass)

Expand Down Expand Up @@ -484,21 +484,21 @@ def expect_select(self):
self.assertEqual(set(results), {'test.incremental'})

self.run_dbt_ls(['--select', 'config.incremental_strategy:insert_overwrite'], expect_pass=True)

def expect_selected_keys(self):
"""Expect selected fields of the the selected model
"""
expectations = [{
'database': self.default_database,
'schema': self.unique_schema(),
'database': self.default_database,
'schema': self.unique_schema(),
'alias': 'inner'
}]
results = self.run_dbt_ls(['--model', 'inner', '--output', 'json', '--output-keys', 'database,schema,alias'])
self.assertEqual(len(results), len(expectations))

for got, expected in zip(results, expectations):
self.assertEqualJSON(got, expected)

"""Expect selected fields of the test resource types
"""
expectations = [
Expand All @@ -510,7 +510,7 @@ def expect_selected_keys(self):
self.assertEqual(len(results), len(expectations))

for got, expected in zip(
sorted(results, key=lambda x: json.loads(x).get("name")),
sorted(results, key=lambda x: json.loads(x).get("name")),
sorted(expectations, key=lambda x: x.get("name"))
):
self.assertEqualJSON(got, expected)
Expand Down
10 changes: 5 additions & 5 deletions test/unit/test_macro_calls.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,16 +17,16 @@ def setUp(self):
"{% macro lr_macro() %} {{ return(load_result('relations').table) }} {% endmacro %}",
"{% macro get_snapshot_unique_id() -%} {{ return(adapter.dispatch('get_snapshot_unique_id')()) }} {%- endmacro %}",
"{% macro get_columns_in_query(select_sql) -%} {{ return(adapter.dispatch('get_columns_in_query')(select_sql)) }} {% endmacro %}",
"""{% macro test_mutually_exclusive_ranges(model) %}
with base as (
"""{% macro test_mutually_exclusive_ranges(model) %}
with base as (
select {{ get_snapshot_unique_id() }} as dbt_unique_id,
*
*
from {{ model }} )
{% endmacro %}""",
"{% macro test_my_test(model) %} select {{ dbt_utils.current_timestamp() }} {% endmacro %}",
"{% macro some_test(model) -%} {{ return(adapter.dispatch('test_some_kind4', 'foo_utils4')) }} {%- endmacro %}",
"{% macro some_test(model) -%} {{ return(adapter.dispatch('test_some_kind5', macro_namespace = 'foo_utils5')) }} {%- endmacro %}",
]
]

self.possible_macro_calls = [
['nested_macro'],
Expand All @@ -47,6 +47,6 @@ def test_macro_calls(self):
for macro_string in self.macro_strings:
possible_macro_calls = statically_extract_macro_calls(macro_string, ctx)
self.assertEqual(self.possible_macro_calls[index], possible_macro_calls)
index = index + 1
index += 1